From f2b5d08f008758ea52debf5b55c70bb47b490c07 Mon Sep 17 00:00:00 2001 From: jm96441n Date: Wed, 21 Feb 2024 18:30:15 +0000 Subject: [PATCH] backport of commit 46b5465a4175cea55634348a88915cdba570cae6 --- .changelog/17107.txt | 3 - .changelog/17155.txt | 3 - .changelog/17481.txt | 3 - .changelog/17593.txt | 3 - .changelog/17694.txt | 3 - .changelog/17754.txt | 4 +- .changelog/17831.txt | 3 - .changelog/17936.txt | 3 - .changelog/18007.txt | 3 - .changelog/18300.txt | 3 - .changelog/18303.txt | 3 + .changelog/18324.txt | 3 - .changelog/18336.txt | 7 - .changelog/18367.txt | 3 - .changelog/18439.txt | 3 - .changelog/18504.txt | 3 - .changelog/18560.txt | 3 - .changelog/18573.txt | 3 - .changelog/18583.txt | 3 - .changelog/18646.txt | 3 - .changelog/18668.txt | 3 - .changelog/18708.txt | 7 - .changelog/18719.txt | 7 - .changelog/18769.txt | 3 - .changelog/18813.txt | 3 - .changelog/18816.txt | 3 - .changelog/18943.txt | 3 - .changelog/18983.txt | 3 - .changelog/18994.txt | 20 - .changelog/19077.txt | 3 - .changelog/19120.txt | 3 + .changelog/19218.txt | 3 - .changelog/19273.txt | 3 + .changelog/19306.txt | 3 - .changelog/19311.txt | 3 - .changelog/19314.txt | 3 - .changelog/19342.txt | 3 - .changelog/19389.txt | 3 - .changelog/19443.txt | 3 + .changelog/19499.txt | 3 - .changelog/19549.txt | 3 - .changelog/19586.txt | 3 - .changelog/19594.txt | 3 - .changelog/19647.txt | 3 - .changelog/19666.txt | 3 - .changelog/19728.txt | 3 - .changelog/19735.txt | 3 - .changelog/19821.txt | 3 - .changelog/19827.txt | 3 - .changelog/19879.txt | 3 - .changelog/19907.txt | 3 - .changelog/19940.txt | 2 +- .changelog/19943.txt | 3 - .changelog/19992.txt | 3 - .changelog/20010.txt | 2 +- .changelog/20011.txt | 2 +- .changelog/20012.txt | 2 +- .changelog/20013.txt | 3 - .changelog/20015.txt | 3 - .changelog/20023.txt | 3 - .changelog/20078.txt | 3 - .changelog/20111.txt | 3 - .changelog/20220.txt | 3 - .changelog/20275.txt | 3 - .changelog/20299.txt | 3 - .changelog/20308.txt | 3 - .changelog/20312.txt | 6 - .changelog/20331.txt | 3 - .changelog/20352.txt | 3 - .changelog/20353.txt | 3 - .changelog/20359.txt | 3 - .changelog/20474.txt | 3 - .changelog/20514.txt | 3 - .changelog/20544.txt | 3 - .changelog/20586.txt | 3 + .changelog/20589.txt | 3 - .changelog/20642.txt | 7 - .changelog/20643.txt | 7 - .changelog/20679.txt | 3 - .changelog/_18366.txt | 3 - .changelog/_18422.txt | 3 - .changelog/_20721.txt | 3 + .changelog/_6074.txt | 3 - .changelog/_6870.txt | 3 - .copywrite.hcl | 6 +- .github/ISSUE_TEMPLATE/config.yml | 2 +- .github/dependabot.yml | 2 +- .github/pr-labeler.yml | 2 +- .github/scripts/changelog_checker.sh | 2 +- .github/scripts/get_runner_classes.sh | 2 +- .github/scripts/get_runner_classes_windows.sh | 26 - .github/scripts/metrics_checker.sh | 2 +- .github/scripts/notify_slack.sh | 30 + .github/scripts/rerun_fails_report.sh | 2 +- .github/scripts/set_test_package_matrix.sh | 12 +- .github/scripts/verify_artifact.sh | 2 +- .github/scripts/verify_bin.sh | 2 +- .github/scripts/verify_deb.sh | 2 +- .github/scripts/verify_docker.sh | 2 +- .github/scripts/verify_envoy_version.sh | 11 +- .github/scripts/verify_rpm.sh | 2 +- .github/workflows/backport-assistant.yml | 2 +- .github/workflows/bot-auto-approve.yaml | 2 +- .github/workflows/broken-link-check.yml | 10 +- .github/workflows/build-artifacts.yml | 14 +- .github/workflows/build-distros.yml | 14 +- .github/workflows/build.yml | 61 +- .github/workflows/changelog-checker.yml | 2 +- .github/workflows/copywrite.hcl | 24 + .github/workflows/embedded-asset-checker.yml | 2 +- .github/workflows/frontend.yml | 74 +- .github/workflows/go-tests.yml | 380 +- .github/workflows/issue-comment-created.yml | 4 +- .github/workflows/jira-issues.yaml | 14 +- .github/workflows/jira-pr.yaml | 14 +- ...t-1.16.x.yaml => nightly-test-1.12.x.yaml} | 45 +- ...t-1.17.x.yaml => nightly-test-1.13.x.yaml} | 57 +- .github/workflows/nightly-test-1.14.x.yaml | 43 +- .github/workflows/nightly-test-1.15.x.yaml | 43 +- .../nightly-test-integrations-1.15.x.yml | 15 +- .../nightly-test-integrations-1.16.x.yml | 13 +- .../nightly-test-integrations-1.17.x.yml | 4 +- .../workflows/nightly-test-integrations.yml | 136 +- .github/workflows/nightly-test-main.yaml | 55 +- .github/workflows/pr-labeler.yml | 16 +- .github/workflows/pr-metrics-test-checker.yml | 2 +- .github/workflows/reusable-check-go-mod.yml | 7 +- .../workflows/reusable-dev-build-windows.yml | 3 + .github/workflows/reusable-dev-build.yml | 17 +- .github/workflows/reusable-lint.yml | 14 +- .github/workflows/reusable-unit-split.yml | 33 +- .github/workflows/reusable-unit.yml | 24 +- .github/workflows/stale.yml | 2 +- .../workflows/test-integrations-windows.yml | 4 +- .github/workflows/test-integrations.yml | 145 +- .github/workflows/verify-envoy-version.yml | 6 +- .gitignore | 3 - .golangci.yml | 7 +- .grpcmocks.yaml | 21 - .pre-commit-config.yaml | 54 - .release/ci.hcl | 2 +- .release/docker/docker-entrypoint-ubi.sh | 2 +- .release/docker/docker-entrypoint-windows.sh | 85 - .release/docker/docker-entrypoint.sh | 2 +- .../linux/package/etc/consul.d/consul.hcl | 2 +- .release/release-metadata.hcl | 2 +- .release/security-scan.hcl | 2 +- CHANGELOG.md | 645 +- Dockerfile | 4 +- Dockerfile-windows | 51 - Makefile => GNUmakefile | 533 +- LICENSE | 2 +- NOTICE.md | 3 + README.md | 6 +- acl/MockAuthorizer.go | 45 +- acl/acl.go | 2 +- acl/acl_ce.go | 3 +- acl/acl_test.go | 225 +- acl/authorizer.go | 99 +- acl/authorizer_ce.go | 3 +- acl/authorizer_test.go | 30 +- acl/chained_authorizer.go | 53 +- acl/chained_authorizer_test.go | 23 +- acl/enterprisemeta_ce.go | 3 +- acl/errors.go | 2 +- acl/errors_ce.go | 3 +- acl/errors_test.go | 2 +- acl/policy.go | 41 +- acl/policy_authorizer.go | 181 +- acl/policy_authorizer_ce.go | 3 +- acl/policy_authorizer_test.go | 268 +- acl/policy_ce.go | 3 +- acl/policy_merger.go | 52 +- acl/policy_merger_ce.go | 3 +- acl/policy_test.go | 178 +- acl/resolver/danger.go | 10 +- acl/resolver/result.go | 2 +- acl/static_authorizer.go | 51 +- acl/static_authorizer_test.go | 2 +- acl/testing.go | 2 +- acl/validation.go | 6 +- acl/validation_test.go | 2 +- agent/acl.go | 2 +- agent/acl_ce.go | 3 +- agent/acl_endpoint.go | 145 +- agent/acl_endpoint_test.go | 144 +- agent/acl_test.go | 6 +- agent/ae/ae.go | 29 +- agent/ae/ae_test.go | 2 +- agent/ae/trigger.go | 2 +- agent/agent.go | 305 +- agent/agent_ce.go | 3 +- agent/agent_ce_test.go | 3 +- agent/agent_endpoint.go | 124 +- agent/agent_endpoint_ce.go | 3 +- agent/agent_endpoint_ce_test.go | 3 +- agent/agent_endpoint_test.go | 237 +- agent/agent_test.go | 62 +- agent/apiserver.go | 2 +- agent/apiserver_test.go | 2 +- agent/auto-config/auto_config.go | 2 +- agent/auto-config/auto_config_ce.go | 3 +- agent/auto-config/auto_config_ce_test.go | 3 +- agent/auto-config/auto_config_test.go | 2 +- agent/auto-config/auto_encrypt.go | 2 +- agent/auto-config/auto_encrypt_test.go | 2 +- agent/auto-config/config.go | 2 +- agent/auto-config/config_ce.go | 3 +- agent/auto-config/config_translate.go | 2 +- agent/auto-config/config_translate_test.go | 2 +- agent/auto-config/mock_ce_test.go | 3 +- agent/auto-config/mock_test.go | 2 +- agent/auto-config/persist.go | 2 +- agent/auto-config/run.go | 2 +- agent/auto-config/server_addr.go | 2 +- agent/auto-config/tls.go | 2 +- agent/auto-config/tls_test.go | 2 +- agent/cache-types/catalog_datacenters.go | 2 +- agent/cache-types/catalog_datacenters_test.go | 2 +- agent/cache-types/catalog_list_services.go | 2 +- .../cache-types/catalog_list_services_test.go | 2 +- agent/cache-types/catalog_service_list.go | 2 +- .../cache-types/catalog_service_list_test.go | 2 +- agent/cache-types/catalog_services.go | 2 +- agent/cache-types/catalog_services_test.go | 2 +- agent/cache-types/config_entry.go | 2 +- agent/cache-types/config_entry_test.go | 2 +- agent/cache-types/connect_ca_root.go | 5 +- agent/cache-types/connect_ca_root_test.go | 2 +- agent/cache-types/discovery_chain.go | 2 +- agent/cache-types/discovery_chain_test.go | 2 +- agent/cache-types/exported_peered_services.go | 2 +- .../exported_peered_services_test.go | 2 +- .../federation_state_list_gateways.go | 2 +- .../federation_state_list_gateways_test.go | 2 +- agent/cache-types/gateway_services.go | 2 +- agent/cache-types/gateway_services_test.go | 2 +- agent/cache-types/health_services.go | 2 +- agent/cache-types/health_services_test.go | 2 +- agent/cache-types/intention_match.go | 2 +- agent/cache-types/intention_match_test.go | 2 +- agent/cache-types/intention_upstreams.go | 2 +- .../intention_upstreams_destination.go | 2 +- .../intention_upstreams_destination_test.go | 2 +- agent/cache-types/intention_upstreams_test.go | 2 +- agent/cache-types/node_services.go | 2 +- agent/cache-types/node_services_test.go | 2 +- agent/cache-types/options.go | 2 +- agent/cache-types/peered_upstreams.go | 2 +- agent/cache-types/peered_upstreams_test.go | 2 +- agent/cache-types/peerings.go | 2 +- agent/cache-types/peerings_test.go | 2 +- agent/cache-types/prepared_query.go | 2 +- agent/cache-types/prepared_query_test.go | 2 +- agent/cache-types/resolved_service_config.go | 2 +- .../resolved_service_config_test.go | 2 +- agent/cache-types/rpc.go | 2 +- agent/cache-types/service_checks.go | 2 +- agent/cache-types/service_checks_test.go | 2 +- agent/cache-types/service_dump.go | 2 +- agent/cache-types/service_dump_test.go | 2 +- agent/cache-types/service_gateways.go | 2 +- agent/cache-types/service_gateways_test.go | 2 +- agent/cache-types/testing.go | 2 +- agent/cache-types/trust_bundle.go | 2 +- agent/cache-types/trust_bundle_test.go | 2 +- agent/cache-types/trust_bundles.go | 2 +- agent/cache-types/trust_bundles_test.go | 2 +- agent/cache/cache.go | 30 +- agent/cache/cache_test.go | 2 +- agent/cache/entry.go | 2 +- agent/cache/request.go | 60 +- agent/cache/testing.go | 2 +- agent/cache/type.go | 2 +- agent/cache/watch.go | 25 +- agent/cache/watch_test.go | 2 +- agent/cacheshim/cache.go | 118 - agent/catalog_endpoint.go | 11 +- agent/catalog_endpoint_ce.go | 3 +- agent/catalog_endpoint_test.go | 54 +- agent/check.go | 2 +- agent/checks/alias.go | 2 +- agent/checks/alias_test.go | 2 +- agent/checks/check.go | 2 +- agent/checks/check_test.go | 2 +- agent/checks/check_windows_test.go | 3 +- agent/checks/docker.go | 2 +- agent/checks/docker_unix.go | 3 +- agent/checks/docker_windows.go | 2 +- agent/checks/grpc.go | 2 +- agent/checks/grpc_test.go | 2 +- agent/checks/os_service.go | 2 +- agent/checks/os_service_unix.go | 3 +- agent/checks/os_service_windows.go | 3 +- agent/config/agent_limits.go | 2 +- agent/config/builder.go | 146 +- agent/config/builder_ce.go | 3 +- agent/config/builder_ce_test.go | 3 +- agent/config/builder_test.go | 240 +- agent/config/config.deepcopy.go | 3 - agent/config/config.go | 5 +- agent/config/config_ce.go | 3 +- agent/config/deep-copy.sh | 1 + agent/config/default.go | 7 +- agent/config/default_ce.go | 3 +- agent/config/deprecated.go | 2 +- agent/config/deprecated_test.go | 2 +- agent/config/doc.go | 2 +- agent/config/file_watcher.go | 2 +- agent/config/file_watcher_test.go | 2 +- agent/config/flags.go | 2 +- agent/config/flags_test.go | 2 +- agent/config/flagset.go | 2 +- agent/config/golden_test.go | 2 +- agent/config/limits.go | 3 +- agent/config/limits_windows.go | 3 +- agent/config/merge.go | 2 +- agent/config/merge_test.go | 2 +- agent/config/ratelimited_file_watcher.go | 2 +- agent/config/ratelimited_file_watcher_test.go | 2 +- agent/config/runtime.go | 16 +- agent/config/runtime_ce.go | 3 +- agent/config/runtime_ce_test.go | 3 +- agent/config/runtime_test.go | 161 +- agent/config/segment_ce.go | 3 +- agent/config/segment_ce_test.go | 3 +- .../TestRuntimeConfig_Sanitize.golden | 8 +- agent/config/testdata/full-config.hcl | 14 +- agent/config/testdata/full-config.json | 12 - agent/config_endpoint.go | 49 +- agent/config_endpoint_test.go | 103 +- agent/configentry/config_entry.go | 2 +- agent/configentry/discoverychain.go | 2 +- agent/configentry/doc.go | 2 +- agent/configentry/merge_service_config.go | 32 +- .../configentry/merge_service_config_test.go | 110 +- agent/configentry/resolve.go | 5 +- agent/configentry/resolve_test.go | 2 +- agent/configentry/service_config.go | 2 +- agent/connect/authz.go | 2 +- agent/connect/authz_test.go | 2 +- agent/connect/ca/common.go | 2 +- agent/connect/ca/provider.go | 4 +- agent/connect/ca/provider_aws.go | 2 +- agent/connect/ca/provider_aws_test.go | 2 +- agent/connect/ca/provider_consul.go | 2 +- agent/connect/ca/provider_consul_config.go | 2 +- agent/connect/ca/provider_consul_test.go | 2 +- agent/connect/ca/provider_test.go | 4 +- agent/connect/ca/provider_vault.go | 16 +- agent/connect/ca/provider_vault_auth.go | 2 +- .../ca/provider_vault_auth_alicloud.go | 2 +- .../connect/ca/provider_vault_auth_approle.go | 2 +- agent/connect/ca/provider_vault_auth_aws.go | 2 +- agent/connect/ca/provider_vault_auth_azure.go | 2 +- agent/connect/ca/provider_vault_auth_gcp.go | 2 +- agent/connect/ca/provider_vault_auth_jwt.go | 2 +- agent/connect/ca/provider_vault_auth_k8s.go | 2 +- agent/connect/ca/provider_vault_auth_test.go | 2 +- agent/connect/ca/provider_vault_test.go | 36 +- agent/connect/ca/testing.go | 6 +- agent/connect/common_names.go | 2 +- agent/connect/csr.go | 2 +- agent/connect/csr_test.go | 2 +- agent/connect/generate.go | 4 +- agent/connect/generate_test.go | 2 +- agent/connect/parsing.go | 4 +- agent/connect/sni.go | 2 +- agent/connect/sni_test.go | 2 +- agent/connect/testing_ca.go | 2 +- agent/connect/testing_ca_test.go | 2 +- agent/connect/testing_spiffe.go | 12 +- agent/connect/uri.go | 30 +- agent/connect/uri_agent.go | 2 +- agent/connect/uri_agent_ce.go | 3 +- agent/connect/uri_agent_ce_test.go | 3 +- agent/connect/uri_mesh_gateway.go | 2 +- agent/connect/uri_mesh_gateway_ce.go | 3 +- agent/connect/uri_mesh_gateway_ce_test.go | 3 +- agent/connect/uri_server.go | 2 +- agent/connect/uri_service.go | 14 +- agent/connect/uri_service_ce.go | 3 +- agent/connect/uri_service_ce_test.go | 3 +- agent/connect/uri_signing.go | 12 +- agent/connect/uri_signing_test.go | 26 +- agent/connect/uri_test.go | 57 +- agent/connect/uri_workload_identity.go | 40 - agent/connect/uri_workload_identity_ce.go | 18 - agent/connect/uri_workload_identity_test.go | 31 - agent/connect/x509_patch.go | 2 +- agent/connect/x509_patch_test.go | 2 +- agent/connect_auth.go | 143 + agent/connect_ca_endpoint.go | 2 +- agent/connect_ca_endpoint_test.go | 2 +- agent/consul/acl.go | 59 +- agent/consul/acl_authmethod.go | 2 +- agent/consul/acl_authmethod_ce.go | 3 +- agent/consul/acl_ce.go | 3 +- agent/consul/acl_ce_test.go | 3 +- agent/consul/acl_client.go | 2 +- agent/consul/acl_endpoint.go | 60 +- agent/consul/acl_endpoint_ce.go | 3 +- agent/consul/acl_endpoint_test.go | 193 +- agent/consul/acl_replication.go | 2 +- agent/consul/acl_replication_test.go | 2 +- agent/consul/acl_replication_types.go | 2 +- agent/consul/acl_server.go | 2 +- agent/consul/acl_server_ce.go | 3 +- agent/consul/acl_test.go | 89 +- agent/consul/acl_token_exp.go | 2 +- agent/consul/acl_token_exp_test.go | 2 +- agent/consul/auth/binder.go | 177 +- agent/consul/auth/binder_ce.go | 3 +- agent/consul/auth/binder_test.go | 197 +- agent/consul/auth/login.go | 4 +- agent/consul/auth/token_writer.go | 37 +- agent/consul/auth/token_writer_ce.go | 3 +- agent/consul/auth/token_writer_test.go | 56 +- agent/consul/authmethod/authmethods.go | 2 +- agent/consul/authmethod/authmethods_ce.go | 3 +- agent/consul/authmethod/awsauth/aws.go | 2 +- agent/consul/authmethod/awsauth/aws_test.go | 2 +- agent/consul/authmethod/kubeauth/k8s.go | 2 +- agent/consul/authmethod/kubeauth/k8s_ce.go | 3 +- agent/consul/authmethod/kubeauth/k8s_test.go | 2 +- agent/consul/authmethod/kubeauth/testing.go | 2 +- agent/consul/authmethod/ssoauth/sso.go | 2 +- agent/consul/authmethod/ssoauth/sso_ce.go | 3 +- agent/consul/authmethod/ssoauth/sso_test.go | 2 +- agent/consul/authmethod/testauth/testing.go | 2 +- .../consul/authmethod/testauth/testing_ce.go | 3 +- agent/consul/authmethod/testing.go | 2 +- agent/consul/auto_config_backend.go | 4 +- agent/consul/auto_config_backend_test.go | 2 +- agent/consul/auto_config_endpoint.go | 6 +- agent/consul/auto_config_endpoint_test.go | 2 +- agent/consul/auto_encrypt_endpoint.go | 2 +- agent/consul/auto_encrypt_endpoint_test.go | 2 +- agent/consul/autopilot.go | 2 +- agent/consul/autopilot_ce.go | 3 +- agent/consul/autopilot_test.go | 2 +- .../autopilotevents/ready_servers_events.go | 2 +- .../ready_servers_events_test.go | 2 +- agent/consul/catalog_endpoint.go | 2 +- agent/consul/catalog_endpoint_test.go | 2 +- agent/consul/client.go | 19 +- agent/consul/client_serf.go | 2 +- agent/consul/client_test.go | 7 +- agent/consul/cluster_test.go | 2 +- agent/consul/config.go | 39 +- agent/consul/config_ce.go | 3 +- .../consul/config_cloud.go | 6 +- agent/consul/config_endpoint.go | 2 +- agent/consul/config_endpoint_test.go | 2 +- agent/consul/config_replication.go | 2 +- agent/consul/config_replication_test.go | 2 +- agent/consul/config_test.go | 2 +- agent/consul/configentry_backend.go | 31 - agent/consul/configentry_backend_ce.go | 18 - agent/consul/configentry_backend_ce_test.go | 87 - agent/consul/configentry_backend_test.go | 49 - agent/consul/connect_ca_endpoint.go | 12 +- agent/consul/connect_ca_endpoint_test.go | 2 +- agent/consul/context.go | 2 +- agent/consul/context_test.go | 2 +- agent/consul/controller/controller.go | 91 +- agent/consul/controller/controller_test.go | 2 +- agent/consul/controller/doc.go | 2 +- agent/consul/controller/queue/defer.go | 18 +- agent/consul/controller/queue/queue.go | 20 +- agent/consul/controller/queue/rate.go | 2 +- agent/consul/controller/queue/rate_test.go | 2 +- agent/consul/controller/queue_test.go | 6 +- agent/consul/controller/reconciler.go | 2 +- agent/consul/controller/reconciler_test.go | 3 +- agent/consul/coordinate_endpoint.go | 2 +- agent/consul/coordinate_endpoint_test.go | 2 +- agent/consul/discovery_chain_endpoint.go | 2 +- agent/consul/discovery_chain_endpoint_test.go | 2 +- agent/consul/discoverychain/compile.go | 2 +- agent/consul/discoverychain/compile_ce.go | 3 +- agent/consul/discoverychain/compile_test.go | 5 +- agent/consul/discoverychain/gateway.go | 23 +- .../discoverychain/gateway_httproute.go | 51 +- .../consul/discoverychain/gateway_tcproute.go | 2 +- agent/consul/discoverychain/gateway_test.go | 108 +- agent/consul/discoverychain/string_stack.go | 2 +- .../discoverychain/string_stack_test.go | 2 +- agent/consul/discoverychain/testing.go | 2 +- agent/consul/enterprise_client_ce.go | 3 +- agent/consul/enterprise_config_ce.go | 3 +- agent/consul/enterprise_server_ce.go | 3 +- agent/consul/enterprise_server_ce_test.go | 8 +- agent/consul/federation_state_endpoint.go | 2 +- .../consul/federation_state_endpoint_test.go | 2 +- agent/consul/federation_state_replication.go | 2 +- .../federation_state_replication_test.go | 2 +- agent/consul/filter.go | 2 +- agent/consul/filter_test.go | 2 +- agent/consul/flood.go | 2 +- agent/consul/fsm/commands_ce.go | 91 +- agent/consul/fsm/commands_ce_test.go | 3 +- agent/consul/fsm/decode_ce.go | 145 - agent/consul/fsm/decode_downgrade.go | 1011 -- agent/consul/fsm/fsm.go | 19 +- agent/consul/fsm/fsm_test.go | 2 +- .../fsm/log_verification_chunking_shim.go | 2 +- agent/consul/fsm/snapshot.go | 2 +- agent/consul/fsm/snapshot_ce.go | 2 +- agent/consul/fsm/snapshot_ce_test.go | 3 +- agent/consul/fsm/snapshot_test.go | 3 +- agent/consul/gateway_locator.go | 2 +- agent/consul/gateway_locator_test.go | 2 +- agent/consul/gateways/controller_gateways.go | 107 +- .../consul/gateways/controller_gateways_ce.go | 26 - .../gateways/controller_gateways_test.go | 28 +- agent/consul/grpc_integration_test.go | 2 +- agent/consul/health_endpoint.go | 2 +- agent/consul/health_endpoint_test.go | 2 +- agent/consul/helper_test.go | 2 +- agent/consul/intention_endpoint.go | 18 +- agent/consul/intention_endpoint_test.go | 166 +- agent/consul/internal_endpoint.go | 8 +- agent/consul/internal_endpoint_test.go | 40 +- agent/consul/issue_test.go | 2 +- agent/consul/kvs_endpoint.go | 2 +- agent/consul/kvs_endpoint_test.go | 2 +- agent/consul/leader.go | 493 +- agent/consul/leader_ce.go | 17 - agent/consul/leader_ce_test.go | 55 +- agent/consul/leader_connect.go | 2 +- agent/consul/leader_connect_ca.go | 20 +- agent/consul/leader_connect_ca_test.go | 19 +- agent/consul/leader_connect_test.go | 2 +- agent/consul/leader_federation_state_ae.go | 2 +- .../consul/leader_federation_state_ae_test.go | 2 +- agent/consul/leader_intentions.go | 2 +- agent/consul/leader_intentions_ce.go | 3 +- agent/consul/leader_intentions_ce_test.go | 3 +- agent/consul/leader_intentions_test.go | 2 +- agent/consul/leader_log_verification.go | 2 +- agent/consul/leader_metrics.go | 2 +- agent/consul/leader_metrics_test.go | 2 +- agent/consul/leader_peering.go | 2 +- agent/consul/leader_peering_test.go | 6 +- agent/consul/leader_registrator_v1.go | 279 - agent/consul/leader_registrator_v1_test.go | 887 -- agent/consul/leader_registrator_v2.go | 407 - agent/consul/leader_registrator_v2_test.go | 583 - agent/consul/leader_test.go | 876 +- agent/consul/logging.go | 2 +- agent/consul/logging_test.go | 2 +- agent/consul/merge.go | 2 +- agent/consul/merge_ce.go | 3 +- agent/consul/merge_ce_test.go | 3 +- agent/consul/merge_test.go | 2 +- agent/consul/multilimiter/multilimiter.go | 2 +- .../consul/multilimiter/multilimiter_test.go | 2 +- agent/consul/operator_autopilot_endpoint.go | 2 +- .../operator_autopilot_endpoint_test.go | 2 +- agent/consul/operator_backend.go | 2 +- agent/consul/operator_backend_test.go | 5 +- agent/consul/operator_endpoint.go | 2 +- agent/consul/operator_raft_endpoint.go | 2 +- agent/consul/operator_raft_endpoint_test.go | 2 +- agent/consul/operator_usage_endpoint.go | 2 +- agent/consul/options.go | 44 +- agent/consul/options_ce.go | 3 +- agent/consul/peering_backend.go | 2 +- agent/consul/peering_backend_ce.go | 3 +- agent/consul/peering_backend_ce_test.go | 3 +- agent/consul/peering_backend_test.go | 2 +- agent/consul/prepared_query/template.go | 2 +- agent/consul/prepared_query/template_test.go | 2 +- agent/consul/prepared_query/walk.go | 2 +- agent/consul/prepared_query/walk_ce_test.go | 3 +- agent/consul/prepared_query/walk_test.go | 2 +- agent/consul/prepared_query_endpoint.go | 2 +- agent/consul/prepared_query_endpoint_ce.go | 3 +- .../consul/prepared_query_endpoint_ce_test.go | 3 +- agent/consul/prepared_query_endpoint_test.go | 4 +- agent/consul/raft_handle.go | 2 +- agent/consul/raft_rpc.go | 2 +- agent/consul/rate/handler.go | 2 +- agent/consul/rate/handler_ce.go | 3 +- agent/consul/rate/handler_test.go | 2 +- agent/consul/rate/metrics.go | 2 +- agent/consul/replication.go | 2 +- agent/consul/replication_test.go | 2 +- agent/consul/reporting/reporting.go | 2 +- agent/consul/reporting/reporting_ce.go | 3 +- agent/consul/rpc.go | 6 +- agent/consul/rpc_test.go | 2 +- agent/consul/rtt.go | 2 +- agent/consul/rtt_test.go | 2 +- agent/consul/segment_ce.go | 3 +- agent/consul/serf_filter.go | 2 +- agent/consul/serf_test.go | 2 +- agent/consul/server.go | 544 +- agent/consul/server_ce.go | 24 +- agent/consul/server_ce_test.go | 3 +- agent/consul/server_connect.go | 34 +- agent/consul/server_grpc.go | 560 - agent/consul/server_log_verification.go | 2 +- agent/consul/server_lookup.go | 2 +- agent/consul/server_lookup_test.go | 2 +- agent/consul/server_metadata.go | 2 +- agent/consul/server_metadata_test.go | 2 +- agent/consul/server_overview.go | 2 +- agent/consul/server_overview_test.go | 2 +- agent/consul/server_register.go | 2 +- agent/consul/server_serf.go | 2 +- agent/consul/server_test.go | 105 +- agent/consul/servercert/manager.go | 2 +- agent/consul/servercert/manager_test.go | 2 +- agent/consul/session_endpoint.go | 2 +- agent/consul/session_endpoint_test.go | 2 +- agent/consul/session_timers.go | 2 +- agent/consul/session_timers_test.go | 2 +- agent/consul/session_ttl.go | 2 +- agent/consul/session_ttl_test.go | 2 +- agent/consul/snapshot_endpoint.go | 2 +- agent/consul/snapshot_endpoint_test.go | 2 +- agent/consul/state/acl.go | 25 +- agent/consul/state/acl_ce.go | 3 +- agent/consul/state/acl_ce_test.go | 3 +- agent/consul/state/acl_events.go | 2 +- agent/consul/state/acl_events_test.go | 2 +- agent/consul/state/acl_schema.go | 14 +- agent/consul/state/acl_test.go | 83 +- agent/consul/state/autopilot.go | 2 +- agent/consul/state/autopilot_test.go | 2 +- agent/consul/state/catalog.go | 16 +- agent/consul/state/catalog_ce.go | 3 +- agent/consul/state/catalog_ce_test.go | 3 +- agent/consul/state/catalog_events.go | 4 +- agent/consul/state/catalog_events_ce.go | 3 +- agent/consul/state/catalog_events_ce_test.go | 3 +- agent/consul/state/catalog_events_test.go | 2 +- agent/consul/state/catalog_schema.deepcopy.go | 3 - agent/consul/state/catalog_schema.go | 2 +- agent/consul/state/catalog_test.go | 2 +- agent/consul/state/config_entry.go | 2 +- agent/consul/state/config_entry_ce.go | 3 +- agent/consul/state/config_entry_ce_test.go | 3 +- agent/consul/state/config_entry_events.go | 9 +- .../consul/state/config_entry_events_test.go | 2 +- .../state/config_entry_exported_services.go | 110 +- .../config_entry_exported_services_ce.go | 32 +- .../config_entry_exported_services_ce_test.go | 317 - agent/consul/state/config_entry_intention.go | 2 +- .../consul/state/config_entry_intention_ce.go | 3 +- .../state/config_entry_sameness_group_ce.go | 3 +- .../config_entry_sameness_group_ce_test.go | 3 +- agent/consul/state/config_entry_schema.go | 2 +- agent/consul/state/config_entry_test.go | 2 +- agent/consul/state/connect_ca.go | 2 +- agent/consul/state/connect_ca_events.go | 2 +- agent/consul/state/connect_ca_events_test.go | 2 +- agent/consul/state/connect_ca_test.go | 2 +- agent/consul/state/coordinate.go | 2 +- agent/consul/state/coordinate_ce.go | 3 +- agent/consul/state/coordinate_ce_test.go | 3 +- agent/consul/state/coordinate_test.go | 2 +- agent/consul/state/delay_ce.go | 3 +- agent/consul/state/delay_test.go | 2 +- agent/consul/state/events.go | 4 +- agent/consul/state/events_test.go | 2 +- agent/consul/state/federation_state.go | 2 +- agent/consul/state/graveyard.go | 2 +- agent/consul/state/graveyard_ce.go | 3 +- agent/consul/state/graveyard_test.go | 2 +- agent/consul/state/index_connect_test.go | 2 +- agent/consul/state/indexer.go | 2 +- agent/consul/state/intention.go | 14 +- agent/consul/state/intention_ce.go | 3 +- agent/consul/state/intention_test.go | 104 +- agent/consul/state/kvs.go | 2 +- agent/consul/state/kvs_ce.go | 3 +- agent/consul/state/kvs_ce_test.go | 3 +- agent/consul/state/kvs_test.go | 2 +- agent/consul/state/memdb.go | 3 +- agent/consul/state/memdb_test.go | 2 +- agent/consul/state/operations_ce.go | 3 +- agent/consul/state/peering.go | 5 +- agent/consul/state/peering_ce.go | 3 +- agent/consul/state/peering_ce_test.go | 3 +- agent/consul/state/peering_test.go | 2 +- agent/consul/state/prepared_query.go | 2 +- agent/consul/state/prepared_query_index.go | 2 +- .../consul/state/prepared_query_index_test.go | 2 +- agent/consul/state/prepared_query_test.go | 2 +- agent/consul/state/query.go | 2 +- agent/consul/state/query_ce.go | 3 +- agent/consul/state/schema.go | 2 +- agent/consul/state/schema_ce.go | 3 +- agent/consul/state/schema_ce_test.go | 3 +- agent/consul/state/schema_test.go | 2 +- agent/consul/state/session.go | 2 +- agent/consul/state/session_ce.go | 3 +- agent/consul/state/session_test.go | 2 +- agent/consul/state/state_store.go | 2 +- agent/consul/state/state_store_ce_test.go | 3 +- agent/consul/state/state_store_test.go | 2 +- agent/consul/state/store_integration_test.go | 2 +- agent/consul/state/system_metadata.go | 2 +- agent/consul/state/system_metadata_test.go | 2 +- agent/consul/state/tombstone_gc.go | 2 +- agent/consul/state/tombstone_gc_test.go | 2 +- agent/consul/state/txn.go | 2 +- agent/consul/state/txn_test.go | 2 +- agent/consul/state/usage.go | 2 +- agent/consul/state/usage_ce.go | 3 +- agent/consul/state/usage_test.go | 2 +- agent/consul/stats_fetcher.go | 2 +- agent/consul/stats_fetcher_test.go | 2 +- agent/consul/status_endpoint.go | 2 +- agent/consul/status_endpoint_test.go | 2 +- agent/consul/stream/event.go | 2 +- agent/consul/stream/event_buffer.go | 2 +- agent/consul/stream/event_buffer_test.go | 2 +- agent/consul/stream/event_publisher.go | 49 +- agent/consul/stream/event_publisher_test.go | 8 +- agent/consul/stream/event_snapshot.go | 2 +- agent/consul/stream/event_snapshot_test.go | 2 +- agent/consul/stream/event_test.go | 2 +- agent/consul/stream/noop.go | 2 +- agent/consul/stream/string_types.go | 2 +- agent/consul/stream/subscription.go | 2 +- agent/consul/stream/subscription_test.go | 2 +- agent/consul/subscribe_backend.go | 2 +- agent/consul/subscribe_backend_test.go | 2 +- agent/consul/system_metadata.go | 2 +- agent/consul/system_metadata_test.go | 2 +- agent/consul/tenancy_bridge.go | 17 - agent/consul/tenancy_bridge_ce.go | 28 - .../testdata/v2-resource-dependencies.md | 68 - agent/consul/txn_endpoint.go | 2 +- agent/consul/txn_endpoint_test.go | 2 +- agent/consul/type_registry.go | 36 - agent/consul/usagemetrics/usagemetrics.go | 2 +- agent/consul/usagemetrics/usagemetrics_ce.go | 3 +- .../usagemetrics/usagemetrics_ce_test.go | 3 +- .../consul/usagemetrics/usagemetrics_test.go | 2 +- agent/consul/util.go | 15 +- agent/consul/util_test.go | 2 +- agent/consul/v2_config_entry_exports_shim.go | 169 - .../v2_config_entry_exports_shim_test.go | 97 - agent/consul/wanfed/pool.go | 2 +- agent/consul/wanfed/wanfed.go | 2 +- agent/consul/wanfed/wanfed_test.go | 2 +- agent/consul/watch/server_local.go | 2 +- agent/consul/watch/server_local_test.go | 2 +- agent/consul/xdscapacity/capacity.go | 2 +- agent/consul/xdscapacity/capacity_test.go | 2 +- agent/coordinate_endpoint.go | 2 +- agent/coordinate_endpoint_test.go | 2 +- agent/debug/host.go | 2 +- agent/debug/host_test.go | 2 +- agent/delegate_mock_test.go | 7 +- agent/denylist.go | 2 +- agent/denylist_test.go | 2 +- agent/discovery/discovery.go | 251 - agent/discovery/discovery_test.go | 221 - agent/discovery/mock_CatalogDataFetcher.go | 209 - agent/discovery/query_fetcher_v1.go | 650 -- agent/discovery/query_fetcher_v1_ce.go | 38 - agent/discovery/query_fetcher_v1_ce_test.go | 11 - agent/discovery/query_fetcher_v1_test.go | 205 - agent/discovery/query_fetcher_v2.go | 359 - agent/discovery/query_fetcher_v2_test.go | 859 -- agent/discovery_chain_endpoint.go | 2 +- agent/discovery_chain_endpoint_test.go | 2 +- agent/dns.go | 225 +- agent/{structs => dns}/dns.go | 16 +- agent/dns/dns_address.go | 87 - agent/dns/dns_address_test.go | 168 - agent/{structs => dns}/dns_test.go | 4 +- agent/dns/mock_DNSRouter.go | 66 - agent/dns/mock_dnsRecursor.go | 55 - agent/dns/parser.go | 89 - agent/dns/recursor.go | 123 - agent/dns/recursor_test.go | 39 - agent/dns/router.go | 1458 --- agent/dns/router_query.go | 239 - agent/dns/router_query_test.go | 224 - agent/dns/router_response.go | 259 - agent/dns/router_service_test.go | 168 - agent/dns/router_test.go | 3464 ------ agent/dns/server.go | 104 - agent/dns/validation.go | 30 + agent/dns/validation_test.go | 53 + agent/dns_catalogv2_test.go | 515 - agent/dns_ce.go | 7 +- agent/dns_ce_test.go | 268 +- agent/dns_node_lookup_test.go | 704 -- agent/dns_reverse_lookup_test.go | 476 - agent/dns_service_lookup_test.go | 3893 ------- agent/dns_test.go | 9553 ++++++++++++----- agent/enterprise_delegate_ce.go | 3 +- .../builtin/aws-lambda/aws_lambda.go | 2 +- .../builtin/aws-lambda/aws_lambda_test.go | 2 +- .../builtin/ext-authz/ext_authz.go | 19 +- .../builtin/ext-authz/ext_authz_test.go | 2 +- .../builtin/ext-authz/structs.go | 5 +- agent/envoyextensions/builtin/lua/lua.go | 2 +- agent/envoyextensions/builtin/lua/lua_test.go | 2 +- .../otel_access_logging.go | 274 - .../otel_access_logging_test.go | 113 - .../builtin/otel-access-logging/structs.go | 424 - agent/envoyextensions/builtin/wasm/structs.go | 2 +- agent/envoyextensions/builtin/wasm/wasm.go | 2 +- .../envoyextensions/builtin/wasm/wasm_test.go | 17 +- .../envoyextensions/registered_extensions.go | 24 +- .../registered_extensions_ce.go | 8 - .../registered_extensions_test.go | 2 +- agent/event_endpoint.go | 2 +- agent/event_endpoint_test.go | 4 +- agent/exec/exec.go | 2 +- agent/exec/exec_unix.go | 3 +- agent/exec/exec_windows.go | 3 +- agent/federation_state_endpoint.go | 2 +- agent/grpc-external/forward.go | 2 +- agent/grpc-external/limiter/limiter.go | 2 +- agent/grpc-external/limiter/limiter_test.go | 2 +- agent/grpc-external/options.go | 2 +- agent/grpc-external/options_test.go | 2 +- agent/grpc-external/server.go | 41 +- agent/grpc-external/services/acl/login.go | 2 +- .../grpc-external/services/acl/login_test.go | 2 +- agent/grpc-external/services/acl/logout.go | 2 +- .../grpc-external/services/acl/logout_test.go | 2 +- agent/grpc-external/services/acl/server.go | 6 +- .../grpc-external/services/acl/server_test.go | 2 +- .../services/configentry/server.go | 133 - .../services/configentry/server_ce_test.go | 83 - .../services/configentry/server_test.go | 236 - .../services/connectca/server.go | 6 +- .../services/connectca/server_test.go | 2 +- .../grpc-external/services/connectca/sign.go | 2 +- .../services/connectca/sign_test.go | 2 +- .../services/connectca/watch_roots.go | 2 +- .../services/connectca/watch_roots_test.go | 2 +- .../dataplane/get_envoy_bootstrap_params.go | 150 +- .../get_envoy_bootstrap_params_test.go | 268 +- .../dataplane/get_supported_features.go | 4 +- .../dataplane/get_supported_features_test.go | 2 +- .../services/dataplane/server.go | 11 +- .../services/dataplane/server_test.go | 2 +- agent/grpc-external/services/dns/server.go | 6 +- .../grpc-external/services/dns/server_test.go | 4 +- agent/grpc-external/services/dns/server_v2.go | 89 - .../services/dns/server_v2_test.go | 130 - .../services/peerstream/health_snapshot.go | 2 +- .../peerstream/health_snapshot_test.go | 2 +- .../services/peerstream/replication.go | 2 +- .../services/peerstream/server.go | 6 +- .../services/peerstream/server_test.go | 2 +- .../services/peerstream/stream_resources.go | 2 +- .../services/peerstream/stream_test.go | 4 +- .../services/peerstream/stream_tracker.go | 2 +- .../peerstream/stream_tracker_test.go | 2 +- .../peerstream/subscription_blocking.go | 2 +- .../peerstream/subscription_manager.go | 6 +- .../peerstream/subscription_manager_test.go | 2 +- .../services/peerstream/subscription_state.go | 2 +- .../peerstream/subscription_state_test.go | 2 +- .../services/peerstream/subscription_view.go | 2 +- .../peerstream/subscription_view_test.go | 2 +- .../services/peerstream/testing.go | 2 +- .../grpc-external/services/resource/delete.go | 155 +- .../services/resource/delete_ce.go | 15 - .../services/resource/delete_test.go | 526 +- agent/grpc-external/services/resource/list.go | 76 +- .../services/resource/list_by_owner.go | 74 +- .../services/resource/list_by_owner_test.go | 301 +- .../services/resource/list_test.go | 224 +- .../services/resource/mock_Registry.go | 16 - .../services/resource/mock_TenancyBridge.go | 121 - .../services/resource/mutate_and_validate.go | 145 - .../resource/mutate_and_validate_test.go | 212 - agent/grpc-external/services/resource/read.go | 88 +- .../services/resource/read_test.go | 386 +- .../grpc-external/services/resource/server.go | 235 +- .../services/resource/server_ce.go | 63 - .../services/resource/server_ce_test.go | 16 - .../services/resource/server_test.go | 205 +- .../services/resource/testing/builder.go | 193 - .../services/resource/testing/builder_ce.go | 38 - .../services/resource/testing/testing.go | 60 +- .../services/resource/testing/testing_ce.go | 63 - .../grpc-external/services/resource/watch.go | 114 +- .../services/resource/watch_test.go | 266 +- .../grpc-external/services/resource/write.go | 230 +- .../resource/write_mav_common_test.go | 314 - .../services/resource/write_status.go | 94 +- .../services/resource/write_status_test.go | 447 +- .../services/resource/write_test.go | 660 +- .../services/serverdiscovery/server.go | 6 +- .../services/serverdiscovery/server_test.go | 2 +- .../services/serverdiscovery/watch_servers.go | 2 +- .../serverdiscovery/watch_servers_test.go | 2 +- agent/grpc-external/stats_test.go | 4 +- agent/grpc-external/testutils/acl.go | 33 +- agent/grpc-external/testutils/fsm.go | 44 +- .../testutils/mock_server_transport_stream.go | 27 - agent/grpc-external/testutils/server.go | 4 +- agent/grpc-external/utils.go | 8 +- agent/grpc-internal/balancer/balancer.go | 2 +- agent/grpc-internal/balancer/balancer_test.go | 2 +- agent/grpc-internal/balancer/registry.go | 2 +- agent/grpc-internal/client.go | 2 +- agent/grpc-internal/client_test.go | 2 +- agent/grpc-internal/handler.go | 13 +- agent/grpc-internal/handler_test.go | 2 +- agent/grpc-internal/listener.go | 2 +- agent/grpc-internal/pipe.go | 2 +- agent/grpc-internal/pipe_test.go | 2 +- agent/grpc-internal/resolver/registry.go | 2 +- agent/grpc-internal/resolver/resolver.go | 2 +- agent/grpc-internal/server_test.go | 11 +- .../services/subscribe/logger.go | 2 +- .../services/subscribe/subscribe.go | 2 +- .../services/subscribe/subscribe_test.go | 62 +- agent/grpc-internal/stats_test.go | 8 +- agent/grpc-internal/tracker.go | 2 +- agent/grpc-middleware/auth_interceptor.go | 2 +- .../grpc-middleware/auth_interceptor_test.go | 2 +- agent/grpc-middleware/handshake.go | 2 +- agent/grpc-middleware/handshake_test.go | 2 +- agent/grpc-middleware/rate.go | 2 +- .../rate_limit_mappings.gen.go | 64 +- agent/grpc-middleware/rate_test.go | 2 +- agent/grpc-middleware/recovery.go | 2 +- agent/grpc-middleware/stats.go | 9 +- agent/grpc-middleware/testutil/fake_sink.go | 2 +- .../testutil/testservice/buf.gen.yaml | 2 +- .../testutil/testservice/fake_service.go | 2 +- .../testutil/testservice/simple.pb.go | 2 +- .../testutil/testservice/simple.proto | 2 +- agent/hcp/bootstrap/bootstrap.go | 265 +- agent/hcp/bootstrap/bootstrap_test.go | 459 +- agent/hcp/bootstrap/config-loader/loader.go | 179 - .../bootstrap/config-loader/loader_test.go | 391 - agent/hcp/bootstrap/constants/constants.go | 9 - agent/hcp/bootstrap/testing.go | 2 +- agent/hcp/client/client.go | 84 +- agent/hcp/client/client_test.go | 3 - agent/hcp/client/errors.go | 11 - agent/hcp/client/http_client.go | 57 - agent/hcp/client/http_client_test.go | 30 - agent/hcp/client/metrics_client.go | 103 +- agent/hcp/client/metrics_client_test.go | 83 +- agent/hcp/client/mock_Client.go | 148 +- .../{config => client}/mock_CloudConfig.go | 2 +- agent/hcp/client/telemetry_config.go | 19 +- agent/hcp/client/telemetry_config_test.go | 47 +- agent/hcp/config/config.go | 66 +- agent/hcp/config/config_test.go | 82 - agent/hcp/deps.go | 86 +- agent/hcp/deps_test.go | 84 +- agent/hcp/discover/discover.go | 2 +- agent/hcp/link_watch.go | 68 - agent/hcp/link_watch_test.go | 101 - agent/hcp/manager.go | 287 +- agent/hcp/manager_lifecycle.go | 107 - agent/hcp/manager_lifecycle_test.go | 236 - agent/hcp/manager_test.go | 375 +- agent/hcp/mock_Manager.go | 209 - agent/hcp/mock_TelemetryProvider.go | 115 - agent/hcp/scada/capabilities.go | 2 +- agent/hcp/scada/mock_Provider.go | 132 +- agent/hcp/scada/scada.go | 59 +- agent/hcp/scada/scada_test.go | 52 - agent/hcp/telemetry/otel_exporter.go | 6 - agent/hcp/telemetry/otel_exporter_test.go | 17 +- agent/hcp/telemetry/otel_sink.go | 26 +- agent/hcp/telemetry/otel_sink_test.go | 34 +- agent/hcp/telemetry/otlp_transform.go | 10 +- agent/hcp/telemetry/otlp_transform_test.go | 6 +- agent/hcp/telemetry_provider.go | 302 +- agent/hcp/telemetry_provider_test.go | 441 +- agent/hcp/testing.go | 2 +- agent/hcp/testserver/main.go | 2 +- agent/health_endpoint.go | 5 +- agent/health_endpoint_test.go | 53 +- agent/http.go | 98 +- agent/http_ce.go | 3 +- agent/http_ce_test.go | 2 +- agent/http_decode_test.go | 2 +- agent/http_register.go | 6 +- agent/http_test.go | 4 +- agent/intentions_endpoint.go | 2 +- agent/intentions_endpoint_ce_test.go | 3 +- agent/intentions_endpoint_test.go | 2 +- agent/keyring.go | 2 +- agent/keyring_test.go | 2 +- agent/kvs_endpoint.go | 2 +- agent/kvs_endpoint_test.go | 2 +- agent/leafcert/cached_roots.go | 13 +- agent/leafcert/generate.go | 14 +- agent/leafcert/leafcert.go | 18 +- agent/leafcert/leafcert_test.go | 128 +- agent/leafcert/roots.go | 4 +- ...eafcert_test_helpers.go => signer_test.go} | 172 +- agent/leafcert/structs.go | 25 +- agent/leafcert/watch.go | 12 +- agent/local/state.go | 2 +- agent/local/state_internal_test.go | 2 +- agent/local/state_test.go | 2 +- agent/local/testing.go | 2 +- agent/log-drop/log-drop.go | 2 +- agent/log-drop/log-drop_test.go | 2 +- agent/metadata/build.go | 2 +- agent/metadata/build_test.go | 2 +- agent/metadata/server.go | 2 +- agent/metadata/server_internal_test.go | 2 +- agent/metadata/server_test.go | 2 +- agent/metrics.go | 2 +- agent/metrics/testing.go | 2 +- agent/metrics_test.go | 10 +- agent/mock/notify.go | 2 +- agent/nodeid.go | 2 +- agent/nodeid_test.go | 2 +- agent/notify.go | 2 +- agent/notify_test.go | 2 +- agent/operator_endpoint.go | 2 +- agent/operator_endpoint_ce.go | 3 +- agent/operator_endpoint_ce_test.go | 3 +- agent/operator_endpoint_test.go | 2 +- agent/peering_endpoint.go | 2 +- agent/peering_endpoint_ce_test.go | 3 +- agent/peering_endpoint_test.go | 2 +- agent/pool/conn.go | 2 +- agent/pool/peek.go | 2 +- agent/pool/peek_test.go | 2 +- agent/pool/pool.go | 2 +- agent/prepared_query_endpoint.go | 5 +- agent/prepared_query_endpoint_test.go | 2 +- agent/proxycfg-glue/config_entry.go | 2 +- agent/proxycfg-glue/discovery_chain.go | 2 +- agent/proxycfg-glue/discovery_chain_test.go | 2 +- .../proxycfg-glue/exported_peered_services.go | 2 +- .../exported_peered_services_test.go | 2 +- .../federation_state_list_mesh_gateways.go | 2 +- ...ederation_state_list_mesh_gateways_test.go | 2 +- agent/proxycfg-glue/gateway_services.go | 2 +- agent/proxycfg-glue/gateway_services_test.go | 2 +- agent/proxycfg-glue/glue.go | 4 +- agent/proxycfg-glue/health.go | 2 +- agent/proxycfg-glue/health_blocking.go | 2 +- agent/proxycfg-glue/health_test.go | 2 +- agent/proxycfg-glue/helpers_test.go | 2 +- agent/proxycfg-glue/intention_upstreams.go | 21 +- .../proxycfg-glue/intention_upstreams_test.go | 45 +- agent/proxycfg-glue/intentions.go | 2 +- agent/proxycfg-glue/intentions_ce.go | 3 +- agent/proxycfg-glue/intentions_test.go | 2 +- agent/proxycfg-glue/internal_service_dump.go | 2 +- .../internal_service_dump_test.go | 2 +- agent/proxycfg-glue/leafcerts.go | 2 +- agent/proxycfg-glue/peered_upstreams.go | 2 +- agent/proxycfg-glue/peered_upstreams_test.go | 2 +- agent/proxycfg-glue/peering_list.go | 2 +- agent/proxycfg-glue/peering_list_test.go | 2 +- .../proxycfg-glue/resolved_service_config.go | 2 +- .../resolved_service_config_test.go | 2 +- agent/proxycfg-glue/service_http_checks.go | 2 +- .../proxycfg-glue/service_http_checks_test.go | 2 +- agent/proxycfg-glue/service_list.go | 2 +- agent/proxycfg-glue/service_list_test.go | 2 +- agent/proxycfg-glue/trust_bundle.go | 2 +- agent/proxycfg-glue/trust_bundle_test.go | 2 +- .../proxycfg-sources/catalog/config_source.go | 15 +- .../catalog/config_source_oss.go | 15 - .../catalog/config_source_test.go | 45 +- .../catalog/mock_ConfigManager.go | 30 +- .../catalog/mock_SessionLimiter.go | 21 +- .../proxycfg-sources/catalog/mock_Watcher.go | 40 +- agent/proxycfg-sources/local/config_source.go | 9 +- agent/proxycfg-sources/local/local.go | 2 +- .../local/mock_ConfigManager.go | 30 +- agent/proxycfg-sources/local/sync.go | 17 +- agent/proxycfg-sources/local/sync_test.go | 17 +- agent/proxycfg/api_gateway.go | 23 +- agent/proxycfg/api_gateway_ce.go | 16 - agent/proxycfg/config_snapshot_glue.go | 69 - agent/proxycfg/config_snapshot_glue_test.go | 315 - agent/proxycfg/connect_proxy.go | 2 +- agent/proxycfg/data_sources.go | 2 +- agent/proxycfg/data_sources_ce.go | 3 +- agent/proxycfg/deep-copy.sh | 2 +- agent/proxycfg/ingress_gateway.go | 2 +- agent/proxycfg/internal/watch/watchmap.go | 2 +- .../proxycfg/internal/watch/watchmap_test.go | 2 +- agent/proxycfg/manager.go | 54 +- agent/proxycfg/manager_test.go | 11 +- agent/proxycfg/mesh_gateway.go | 3 +- agent/proxycfg/mesh_gateway_ce.go | 3 +- agent/proxycfg/naming.go | 2 +- agent/proxycfg/naming_ce.go | 3 +- agent/proxycfg/naming_test.go | 2 +- agent/proxycfg/proxycfg.deepcopy.go | 12 +- agent/proxycfg/proxycfg.go | 4 +- agent/proxycfg/snapshot.go | 5 +- agent/proxycfg/snapshot_test.go | 2 +- agent/proxycfg/state.go | 5 +- agent/proxycfg/state_ce_test.go | 3 +- agent/proxycfg/state_test.go | 2 +- agent/proxycfg/terminating_gateway.go | 2 +- agent/proxycfg/testing.go | 62 +- agent/proxycfg/testing_api_gateway.go | 5 +- agent/proxycfg/testing_ce.go | 3 +- agent/proxycfg/testing_connect_proxy.go | 42 +- agent/proxycfg/testing_ingress_gateway.go | 215 +- agent/proxycfg/testing_mesh_gateway.go | 170 +- agent/proxycfg/testing_peering.go | 134 +- agent/proxycfg/testing_terminating_gateway.go | 5 +- agent/proxycfg/testing_tproxy.go | 2 +- agent/proxycfg/testing_upstreams.go | 175 +- agent/proxycfg/testing_upstreams_ce.go | 3 +- agent/proxycfg/upstreams.go | 2 +- agent/proxycfg_test.go | 14 +- agent/reload.go | 2 +- agent/remote_exec.go | 2 +- agent/remote_exec_test.go | 11 +- agent/retry_join.go | 2 +- agent/retry_join_test.go | 2 +- agent/router/grpc.go | 2 +- agent/router/manager.go | 2 +- agent/router/manager_internal_test.go | 2 +- agent/router/manager_test.go | 2 +- agent/router/router.go | 2 +- agent/router/router_test.go | 2 +- agent/router/serf_adapter.go | 2 +- agent/router/serf_flooder.go | 2 +- agent/routine-leak-checker/leak_test.go | 2 +- agent/rpc/middleware/interceptors.go | 24 +- agent/rpc/middleware/interceptors_test.go | 2 +- agent/rpc/middleware/rate_limit_mappings.go | 2 +- agent/rpc/middleware/recovery.go | 2 +- agent/rpc/operator/service.go | 6 +- agent/rpc/operator/service_test.go | 2 +- agent/rpc/peering/service.go | 6 +- agent/rpc/peering/service_ce_test.go | 3 +- agent/rpc/peering/service_test.go | 7 +- agent/rpc/peering/testing.go | 2 +- agent/rpc/peering/testutil_ce_test.go | 3 +- agent/rpc/peering/validate.go | 2 +- agent/rpc/peering/validate_test.go | 2 +- agent/rpcclient/common.go | 2 +- agent/rpcclient/configentry/configentry.go | 2 +- .../rpcclient/configentry/configentry_test.go | 2 +- agent/rpcclient/configentry/view.go | 2 +- agent/rpcclient/configentry/view_test.go | 2 +- agent/rpcclient/health/health.go | 2 +- agent/rpcclient/health/health_test.go | 2 +- agent/rpcclient/health/streaming_test.go | 2 +- agent/rpcclient/health/view.go | 2 +- agent/rpcclient/health/view_test.go | 2 +- agent/service_checks_test.go | 2 +- agent/service_manager.go | 8 +- agent/service_manager_test.go | 2 +- agent/session_endpoint.go | 2 +- agent/session_endpoint_test.go | 27 +- agent/setup.go | 37 +- agent/setup_ce.go | 3 +- agent/sidecar_service.go | 2 +- agent/sidecar_service_test.go | 2 +- agent/signal_unix.go | 3 +- agent/signal_windows.go | 3 +- agent/snapshot_endpoint.go | 2 +- agent/snapshot_endpoint_test.go | 2 +- agent/status_endpoint.go | 2 +- agent/status_endpoint_test.go | 2 +- agent/streaming_test.go | 2 +- agent/structs/acl.go | 150 +- agent/structs/acl_cache.go | 2 +- agent/structs/acl_cache_test.go | 2 +- agent/structs/acl_ce.go | 3 +- agent/structs/acl_templated_policy.go | 338 - agent/structs/acl_templated_policy_ce.go | 53 - agent/structs/acl_templated_policy_ce_test.go | 137 - agent/structs/acl_templated_policy_test.go | 103 - agent/structs/acl_test.go | 2 +- agent/structs/aclfilter/filter.go | 2 +- agent/structs/aclfilter/filter_test.go | 2 +- .../policies/ce/api-gateway.hcl | 10 - .../acltemplatedpolicy/policies/ce/dns.hcl | 10 - .../acltemplatedpolicy/policies/ce/node.hcl | 7 - .../policies/ce/nomad-client.hcl | 12 - .../policies/ce/nomad-server.hcl | 11 - .../policies/ce/service.hcl | 13 - .../policies/ce/workload-identity.hcl | 3 - .../schemas/api-gateway.json | 13 - .../acltemplatedpolicy/schemas/node.json | 13 - .../acltemplatedpolicy/schemas/service.json | 13 - .../schemas/workload-identity.json | 13 - agent/structs/auto_encrypt.go | 2 +- agent/structs/autopilot.go | 2 +- agent/structs/autopilot_ce.go | 3 +- agent/structs/catalog.go | 2 +- agent/structs/catalog_ce.go | 3 +- agent/structs/check_definition.go | 2 +- agent/structs/check_definition_test.go | 2 +- agent/structs/check_type.go | 2 +- agent/structs/config_entry.go | 70 +- agent/structs/config_entry_apigw_jwt_ce.go | 12 - agent/structs/config_entry_ce.go | 27 +- agent/structs/config_entry_ce_test.go | 60 +- agent/structs/config_entry_discoverychain.go | 10 +- .../structs/config_entry_discoverychain_ce.go | 3 +- .../config_entry_discoverychain_ce_test.go | 3 +- .../config_entry_discoverychain_test.go | 16 +- agent/structs/config_entry_exports.go | 2 +- agent/structs/config_entry_exports_ce.go | 3 +- agent/structs/config_entry_exports_ce_test.go | 3 +- agent/structs/config_entry_exports_test.go | 2 +- agent/structs/config_entry_gateways.go | 13 +- agent/structs/config_entry_gateways_test.go | 2 +- .../config_entry_inline_certificate.go | 3 +- .../config_entry_inline_certificate_test.go | 2 +- agent/structs/config_entry_intentions.go | 2 +- agent/structs/config_entry_intentions_ce.go | 3 +- .../config_entry_intentions_ce_test.go | 3 +- agent/structs/config_entry_intentions_test.go | 2 +- agent/structs/config_entry_jwt_provider.go | 2 +- agent/structs/config_entry_jwt_provider_ce.go | 3 +- .../structs/config_entry_jwt_provider_test.go | 2 +- agent/structs/config_entry_mesh.go | 2 +- agent/structs/config_entry_mesh_ce.go | 3 +- agent/structs/config_entry_mesh_test.go | 2 +- agent/structs/config_entry_routes.go | 67 +- agent/structs/config_entry_routes_test.go | 475 +- agent/structs/config_entry_sameness_group.go | 2 +- .../structs/config_entry_sameness_group_ce.go | 3 +- agent/structs/config_entry_status.go | 2 +- agent/structs/config_entry_test.go | 306 +- agent/structs/connect.go | 2 +- agent/structs/connect_ca.go | 13 +- agent/structs/connect_ca_test.go | 2 +- agent/structs/connect_ce.go | 3 +- agent/structs/connect_proxy_config.go | 45 +- agent/structs/connect_proxy_config_ce.go | 3 +- agent/structs/connect_proxy_config_test.go | 2 +- agent/structs/deep-copy.sh | 2 +- agent/structs/discovery_chain.go | 2 +- agent/structs/discovery_chain_ce.go | 3 +- agent/structs/envoy_extension.go | 2 +- agent/structs/errors.go | 16 +- agent/structs/federation_state.go | 2 +- agent/structs/identity.go | 2 +- agent/structs/intention.go | 7 +- agent/structs/intention_ce.go | 3 +- agent/structs/intention_test.go | 2 +- agent/structs/operator.go | 2 +- agent/structs/peering.go | 2 +- agent/structs/prepared_query.go | 4 +- agent/structs/prepared_query_test.go | 2 +- agent/structs/protobuf_compat.go | 2 +- agent/structs/service_definition.go | 2 +- agent/structs/service_definition_test.go | 2 +- agent/structs/snapshot.go | 2 +- agent/structs/structs.deepcopy.go | 108 - agent/structs/structs.deepcopy_ce.go | 16 - agent/structs/structs.go | 34 +- agent/structs/structs_ce.go | 13 +- agent/structs/structs_ce_test.go | 3 +- agent/structs/structs_ext_test.go | 2 +- agent/structs/structs_filtering_test.go | 2 +- agent/structs/structs_test.go | 48 +- agent/structs/system_metadata.go | 2 +- agent/structs/testing.go | 2 +- agent/structs/testing_catalog.go | 11 +- agent/structs/testing_connect_proxy_config.go | 2 +- agent/structs/testing_intention.go | 2 +- agent/structs/testing_service_definition.go | 2 +- agent/structs/txn.go | 2 +- agent/submatview/handler.go | 2 +- agent/submatview/local_materializer.go | 2 +- agent/submatview/local_materializer_test.go | 2 +- agent/submatview/materializer.go | 2 +- agent/submatview/rpc_materializer.go | 2 +- agent/submatview/store.go | 2 +- agent/submatview/store_integration_test.go | 2 +- agent/submatview/store_test.go | 2 +- agent/submatview/streaming_test.go | 2 +- agent/systemd/notify.go | 2 +- agent/testagent.go | 70 +- agent/testagent_test.go | 2 +- agent/token/persistence.go | 18 +- agent/token/persistence_test.go | 184 +- agent/token/store.go | 41 +- agent/token/store_ce.go | 3 +- agent/token/store_test.go | 52 +- agent/translate_addr.go | 33 +- agent/txn_endpoint.go | 2 +- agent/txn_endpoint_test.go | 2 +- agent/ui_endpoint.go | 3 +- agent/ui_endpoint_ce_test.go | 3 +- agent/ui_endpoint_test.go | 24 +- agent/uiserver/buf_index_fs.go | 2 +- agent/uiserver/buffered_file.go | 2 +- agent/uiserver/dist/index.html | 2 +- agent/uiserver/redirect_fs.go | 2 +- agent/uiserver/ui_template_data.go | 11 +- agent/uiserver/uiserver.go | 2 +- agent/uiserver/uiserver_test.go | 50 +- agent/user_event.go | 2 +- agent/user_event_test.go | 2 +- agent/util.go | 2 +- agent/util_test.go | 2 +- agent/watch_handler.go | 2 +- agent/watch_handler_test.go | 2 +- agent/xds/accesslogs/accesslogs.go | 33 +- agent/xds/clusters.go | 117 +- agent/xds/clusters_test.go | 905 +- agent/xds/config/config.go | 8 +- agent/xds/config/config_test.go | 4 +- agent/xds/configfetcher/config_fetcher.go | 10 - agent/xds/delta.go | 230 +- agent/xds/delta_envoy_extender_ce_test.go | 53 +- agent/xds/delta_envoy_extender_test.go | 2 +- agent/xds/delta_test.go | 26 +- agent/xds/endpoints.go | 117 +- agent/xds/endpoints_test.go | 386 +- agent/xds/extensionruntime/runtime_config.go | 2 +- .../runtime_config_ce_test.go | 3 +- agent/xds/failover_policy.go | 8 +- agent/xds/failover_policy_ce.go | 3 +- agent/xds/golden_test.go | 21 +- agent/xds/gw_per_route_filters_ce.go | 23 - agent/xds/jwt_authn.go | 2 +- agent/xds/jwt_authn_ce.go | 24 - agent/xds/jwt_authn_test.go | 2 +- agent/xds/listeners.go | 29 +- agent/xds/listeners_apigateway.go | 139 +- agent/xds/listeners_ingress.go | 5 +- agent/xds/listeners_test.go | 1268 ++- agent/xds/locality_policy.go | 23 - agent/xds/locality_policy_ce.go | 15 - agent/xds/naming.go | 19 + agent/xds/naming/naming.go | 29 - agent/xds/net_fallback.go | 11 + agent/xds/{platform => }/net_linux.go | 7 +- agent/xds/platform/net_fallback.go | 10 - agent/xds/protocol_trace.go | 17 +- agent/xds/proxystateconverter/clusters.go | 1259 --- agent/xds/proxystateconverter/converter.go | 135 - agent/xds/proxystateconverter/endpoints.go | 671 -- .../proxystateconverter/failover_policy.go | 142 - .../proxystateconverter/failover_policy_ce.go | 14 - agent/xds/proxystateconverter/listeners.go | 1675 --- .../proxystateconverter/locality_policy.go | 21 - .../proxystateconverter/locality_policy_ce.go | 14 - agent/xds/proxystateconverter/routes.go | 805 -- agent/xds/rbac.go | 79 +- agent/xds/rbac_test.go | 526 +- agent/xds/resources.go | 7 +- agent/xds/resources_ce_test.go | 7 +- agent/xds/resources_test.go | 3211 +----- agent/xds/{response => }/response.go | 19 +- agent/xds/routes.go | 167 +- agent/xds/routes_test.go | 303 +- agent/xds/secrets.go | 2 +- agent/xds/server.go | 65 +- agent/xds/server_ce.go | 3 +- agent/xds/testcommon/testcommon.go | 2 +- ...uthz-http-local-grpc-service.latest.golden | 130 +- ...uthz-http-local-http-service.latest.golden | 122 +- ...z-http-upstream-grpc-service.latest.golden | 96 +- ...z-http-upstream-http-service.latest.golden | 96 +- ...authz-tcp-local-grpc-service.latest.golden | 130 +- ...hz-tcp-upstream-grpc-service.latest.golden | 96 +- ...lambda-and-lua-connect-proxy.latest.golden | 58 +- ...-connect-proxy-opposite-meta.latest.golden | 58 +- .../lambda-connect-proxy-tproxy.latest.golden | 124 +- ...terminating-gateway-upstream.latest.golden | 68 +- .../lambda-connect-proxy.latest.golden | 58 +- ...teway-with-service-resolvers.latest.golden | 296 +- .../lambda-terminating-gateway.latest.golden | 196 +- ...terminating-gateway-upstream.latest.golden | 68 +- ...a-inbound-applies-to-inbound.latest.golden | 68 +- ...snt-apply-to-local-upstreams.latest.golden | 68 +- ...es-to-local-upstreams-tproxy.latest.golden | 180 +- ...d-applies-to-local-upstreams.latest.golden | 68 +- ...ound-doesnt-apply-to-inbound.latest.golden | 68 +- ...-consul-constraint-violation.latest.golden | 68 +- ...h-envoy-constraint-violation.latest.golden | 68 +- .../otel-access-logging-http.latest.golden | 136 - ...opertyoverride-add-keepalive.latest.golden | 64 +- ...d-outlier-detection-multiple.latest.golden | 76 +- ...erride-add-outlier-detection.latest.golden | 68 +- ...de-add-round-robin-lb-config.latest.golden | 68 +- ...-load-assignment-inbound-add.latest.golden | 68 +- ...load-assignment-outbound-add.latest.golden | 68 +- ...und-doesnt-apply-to-outbound.latest.golden | 68 +- ...verride-listener-inbound-add.latest.golden | 68 +- ...erride-listener-outbound-add.latest.golden | 68 +- ...ound-doesnt-apply-to-inbound.latest.golden | 68 +- ...ic-upstream-service-failover.latest.golden | 100 +- ...ic-upstream-service-splitter.latest.golden | 98 +- ...ide-remove-outlier-detection.latest.golden | 68 +- .../wasm-http-local-file.latest.golden | 68 +- .../wasm-http-remote-file.latest.golden | 68 +- ...wasm-tcp-local-file-outbound.latest.golden | 68 +- .../wasm-tcp-local-file.latest.golden | 68 +- ...asm-tcp-remote-file-outbound.latest.golden | 68 +- .../wasm-tcp-remote-file.latest.golden | 68 +- ...uthz-http-local-grpc-service.latest.golden | 6 +- ...uthz-http-local-http-service.latest.golden | 6 +- ...z-http-upstream-grpc-service.latest.golden | 6 +- ...z-http-upstream-http-service.latest.golden | 6 +- ...authz-tcp-local-grpc-service.latest.golden | 6 +- ...hz-tcp-upstream-grpc-service.latest.golden | 6 +- ...lambda-and-lua-connect-proxy.latest.golden | 80 +- ...-connect-proxy-opposite-meta.latest.golden | 80 +- .../lambda-connect-proxy-tproxy.latest.golden | 108 +- ...terminating-gateway-upstream.latest.golden | 80 +- .../lambda-connect-proxy.latest.golden | 80 +- ...teway-with-service-resolvers.latest.golden | 116 +- .../lambda-terminating-gateway.latest.golden | 44 +- ...terminating-gateway-upstream.latest.golden | 80 +- ...a-inbound-applies-to-inbound.latest.golden | 80 +- ...snt-apply-to-local-upstreams.latest.golden | 80 +- ...es-to-local-upstreams-tproxy.latest.golden | 4 +- ...d-applies-to-local-upstreams.latest.golden | 80 +- ...ound-doesnt-apply-to-inbound.latest.golden | 80 +- ...-consul-constraint-violation.latest.golden | 80 +- ...h-envoy-constraint-violation.latest.golden | 80 +- .../otel-access-logging-http.latest.golden | 75 - ...opertyoverride-add-keepalive.latest.golden | 4 +- ...d-outlier-detection-multiple.latest.golden | 4 +- ...erride-add-outlier-detection.latest.golden | 4 +- ...de-add-round-robin-lb-config.latest.golden | 4 +- ...-load-assignment-inbound-add.latest.golden | 4 +- ...load-assignment-outbound-add.latest.golden | 4 +- ...und-doesnt-apply-to-outbound.latest.golden | 80 +- ...verride-listener-inbound-add.latest.golden | 80 +- ...erride-listener-outbound-add.latest.golden | 80 +- ...ound-doesnt-apply-to-inbound.latest.golden | 80 +- ...ic-upstream-service-failover.latest.golden | 124 +- ...ic-upstream-service-splitter.latest.golden | 124 +- ...ide-remove-outlier-detection.latest.golden | 4 +- .../wasm-http-local-file.latest.golden | 4 +- .../wasm-http-remote-file.latest.golden | 4 +- ...wasm-tcp-local-file-outbound.latest.golden | 4 +- .../wasm-tcp-local-file.latest.golden | 4 +- ...asm-tcp-remote-file-outbound.latest.golden | 4 +- .../wasm-tcp-remote-file.latest.golden | 4 +- ...uthz-http-local-grpc-service.latest.golden | 101 +- ...uthz-http-local-http-service.latest.golden | 105 +- ...z-http-upstream-grpc-service.latest.golden | 127 +- ...z-http-upstream-http-service.latest.golden | 173 +- ...authz-tcp-local-grpc-service.latest.golden | 30 +- ...hz-tcp-upstream-grpc-service.latest.golden | 36 +- ...lambda-and-lua-connect-proxy.latest.golden | 131 +- ...-connect-proxy-opposite-meta.latest.golden | 60 +- .../lambda-connect-proxy-tproxy.latest.golden | 90 +- ...terminating-gateway-upstream.latest.golden | 40 +- .../lambda-connect-proxy.latest.golden | 60 +- ...teway-with-service-resolvers.latest.golden | 118 +- .../lambda-terminating-gateway.latest.golden | 58 +- ...terminating-gateway-upstream.latest.golden | 40 +- ...a-inbound-applies-to-inbound.latest.golden | 95 +- ...snt-apply-to-local-upstreams.latest.golden | 111 +- ...es-to-local-upstreams-tproxy.latest.golden | 129 +- ...d-applies-to-local-upstreams.latest.golden | 129 +- ...ound-doesnt-apply-to-inbound.latest.golden | 95 +- ...-consul-constraint-violation.latest.golden | 111 +- ...h-envoy-constraint-violation.latest.golden | 111 +- .../otel-access-logging-http.latest.golden | 284 - ...opertyoverride-add-keepalive.latest.golden | 95 +- ...d-outlier-detection-multiple.latest.golden | 95 +- ...erride-add-outlier-detection.latest.golden | 95 +- ...de-add-round-robin-lb-config.latest.golden | 95 +- ...-load-assignment-inbound-add.latest.golden | 95 +- ...load-assignment-outbound-add.latest.golden | 95 +- ...und-doesnt-apply-to-outbound.latest.golden | 101 +- ...verride-listener-inbound-add.latest.golden | 97 +- ...erride-listener-outbound-add.latest.golden | 99 +- ...ound-doesnt-apply-to-inbound.latest.golden | 101 +- ...ic-upstream-service-failover.latest.golden | 97 +- ...ic-upstream-service-splitter.latest.golden | 109 +- ...ch-specific-upstream-service.latest.golden | 5 + ...ide-remove-outlier-detection.latest.golden | 95 +- ...ive-mtls-and-envoy-extension.latest.golden | 381 +- .../wasm-http-local-file.latest.golden | 111 +- .../wasm-http-remote-file.latest.golden | 115 +- ...wasm-tcp-local-file-outbound.latest.golden | 56 +- .../wasm-tcp-local-file.latest.golden | 40 +- ...asm-tcp-remote-file-outbound.latest.golden | 64 +- .../wasm-tcp-remote-file.latest.golden | 44 +- ...uthz-http-local-grpc-service.latest.golden | 6 +- ...uthz-http-local-http-service.latest.golden | 6 +- ...z-http-upstream-grpc-service.latest.golden | 6 +- ...z-http-upstream-http-service.latest.golden | 6 +- ...authz-tcp-local-grpc-service.latest.golden | 6 +- ...hz-tcp-upstream-grpc-service.latest.golden | 6 +- ...lambda-and-lua-connect-proxy.latest.golden | 6 +- ...-connect-proxy-opposite-meta.latest.golden | 6 +- .../lambda-connect-proxy-tproxy.latest.golden | 6 +- ...terminating-gateway-upstream.latest.golden | 6 +- .../routes/lambda-connect-proxy.latest.golden | 6 +- ...teway-with-service-resolvers.latest.golden | 80 +- .../lambda-terminating-gateway.latest.golden | 32 +- ...terminating-gateway-upstream.latest.golden | 6 +- ...a-inbound-applies-to-inbound.latest.golden | 6 +- ...snt-apply-to-local-upstreams.latest.golden | 6 +- ...es-to-local-upstreams-tproxy.latest.golden | 20 +- ...d-applies-to-local-upstreams.latest.golden | 6 +- ...ound-doesnt-apply-to-inbound.latest.golden | 6 +- ...-consul-constraint-violation.latest.golden | 6 +- ...h-envoy-constraint-violation.latest.golden | 6 +- .../otel-access-logging-http.latest.golden | 5 - ...opertyoverride-add-keepalive.latest.golden | 4 +- ...d-outlier-detection-multiple.latest.golden | 4 +- ...erride-add-outlier-detection.latest.golden | 4 +- ...de-add-round-robin-lb-config.latest.golden | 4 +- ...-load-assignment-inbound-add.latest.golden | 4 +- ...load-assignment-outbound-add.latest.golden | 4 +- ...und-doesnt-apply-to-outbound.latest.golden | 6 +- ...verride-listener-inbound-add.latest.golden | 6 +- ...erride-listener-outbound-add.latest.golden | 6 +- ...ound-doesnt-apply-to-inbound.latest.golden | 6 +- ...ic-upstream-service-failover.latest.golden | 33 +- ...ic-upstream-service-splitter.latest.golden | 46 +- ...ide-remove-outlier-detection.latest.golden | 4 +- .../routes/wasm-http-local-file.latest.golden | 4 +- .../wasm-http-remote-file.latest.golden | 4 +- ...wasm-tcp-local-file-outbound.latest.golden | 4 +- .../routes/wasm-tcp-local-file.latest.golden | 4 +- ...asm-tcp-remote-file-outbound.latest.golden | 4 +- .../routes/wasm-tcp-remote-file.latest.golden | 4 +- .../access-logs-defaults.latest.golden | 136 - .../access-logs-json-file.latest.golden | 136 - ...t-stderr-disablelistenerlogs.latest.golden | 136 - ...ttp-listener-with-http-route.latest.golden | 58 - .../api-gateway-http-listener.latest.golden | 5 - ...api-gateway-nil-config-entry.latest.golden | 5 - ...ener-with-tcp-and-http-route.latest.golden | 109 - ...-tcp-listener-with-tcp-route.latest.golden | 58 - .../api-gateway-tcp-listener.latest.golden | 5 - ...oute-and-inline-certificate.latest.golden} | 34 +- ...eway-with-multiple-hostnames.latest.golden | 109 - ...multiple-inline-certificates.latest.golden | 58 - ...nd-inline-certificate.envoy-1-21-x.golden} | 43 +- ...route-and-inline-certificate.latest.golden | 34 +- .../clusters/api-gateway.latest.golden | 5 - ...nect-proxy-exported-to-peers.latest.golden | 10 +- ...connect-proxy-lb-in-resolver.latest.golden | 102 +- ...nnect-proxy-resolver-with-lb.latest.golden | 141 - ...t-proxy-route-to-lb-resolver.latest.golden | 192 - ...ct-proxy-splitter-overweight.latest.golden | 238 - ...nect-proxy-upstream-defaults.latest.golden | 136 - ...and-failover-to-cluster-peer.latest.golden | 102 +- ...roxy-with-chain-and-failover.latest.golden | 102 +- ...oxy-with-chain-and-overrides.latest.golden | 82 +- ...and-redirect-to-cluster-peer.latest.golden | 68 +- ...-proxy-with-chain-and-router.latest.golden | 1615 --- ...roxy-with-chain-and-splitter.latest.golden | 289 - ...roxy-with-chain-external-sni.latest.golden | 68 +- ...nnect-proxy-with-chain-http2.latest.golden | 144 - .../connect-proxy-with-chain.latest.golden | 68 +- ...ult-chain-and-custom-cluster.latest.golden | 142 - ...onnect-proxy-with-grpc-chain.latest.golden | 144 - ...nnect-proxy-with-grpc-router.latest.golden | 203 - ...onnect-proxy-with-http-chain.latest.golden | 136 - ...nnect-proxy-with-http2-chain.latest.golden | 144 - ...-jwt-config-entry-with-local.latest.golden | 68 +- ...onfig-entry-with-remote-jwks.latest.golden | 74 +- ...d-upstreams-escape-overrides.latest.golden | 141 - ...-with-peered-upstreams-http2.latest.golden | 169 - ...-proxy-with-peered-upstreams.latest.golden | 72 +- ...ough-local-gateway-triggered.latest.golden | 132 +- ...ilover-through-local-gateway.latest.golden | 132 +- ...ugh-remote-gateway-triggered.latest.golden | 132 +- ...lover-through-remote-gateway.latest.golden | 132 +- ...ough-local-gateway-triggered.latest.golden | 102 +- ...ilover-through-local-gateway.latest.golden | 102 +- ...ugh-remote-gateway-triggered.latest.golden | 102 +- ...lover-through-remote-gateway.latest.golden | 102 +- ...connect-proxy-with-tcp-chain.latest.golden | 136 - ...h-tls-incoming-cipher-suites.latest.golden | 136 - ...ith-tls-incoming-max-version.latest.golden | 136 - ...ith-tls-incoming-min-version.latest.golden | 136 - ...h-tls-outgoing-cipher-suites.latest.golden | 88 +- ...ith-tls-outgoing-max-version.latest.golden | 76 +- ...ls-outgoing-min-version-auto.latest.golden | 68 +- ...ith-tls-outgoing-min-version.latest.golden | 76 +- ...h-tproxy-and-permissive-mtls.latest.golden | 143 - ...t-tproxy-and-permissive-mtls.latest.golden | 136 - ...-limits-max-connections-only.latest.golden | 84 +- .../custom-limits-set-to-zero.latest.golden | 84 +- .../clusters/custom-limits.latest.golden | 84 +- .../clusters/custom-local-app.latest.golden | 66 +- ...stom-max-inbound-connections.latest.golden | 80 +- ...thcheck-zero-consecutive_5xx.latest.golden | 142 - .../custom-passive-healthcheck.latest.golden | 74 +- ...ustom-public-listener-http-2.latest.golden | 136 - ...public-listener-http-missing.latest.golden | 136 - .../custom-public-listener-http.latest.golden | 136 - .../custom-public-listener.latest.golden | 136 - .../clusters/custom-timeouts.latest.golden | 68 +- .../custom-trace-listener.latest.golden | 136 - ...ustom-upstream-default-chain.latest.golden | 54 +- ...eam-ignored-with-disco-chain.latest.golden | 204 - ...upstream-with-prepared-query.latest.golden | 142 - .../clusters/custom-upstream.latest.golden | 54 +- .../testdata/clusters/defaults.latest.golden | 68 +- ...am-service-with-unix-sockets.latest.golden | 68 +- .../clusters/expose-checks-grpc.latest.golden | 65 - ...ecks-http-with-bind-override.latest.golden | 57 - .../clusters/expose-checks-http.latest.golden | 57 - .../clusters/expose-checks.latest.golden | 57 - ...paths-grpc-new-cluster-http1.latest.golden | 21 +- ...expose-paths-local-app-paths.latest.golden | 10 +- ...pose-paths-new-cluster-http2.latest.golden | 18 +- .../grpc-public-listener.latest.golden | 145 - .../http-listener-with-timeouts.latest.golden | 136 - ...http-public-listener-no-xfcc.latest.golden | 136 - .../http-public-listener.latest.golden | 136 - .../clusters/http-upstream.latest.golden | 136 - .../http2-public-listener.latest.golden | 145 - .../ingress-config-entry-nil.latest.golden | 5 - .../ingress-defaults-no-chain.latest.golden | 5 - ...ess-gateway-nil-config-entry.latest.golden | 4 +- .../ingress-gateway-no-services.latest.golden | 4 +- ...h-tls-outgoing-cipher-suites.latest.golden | 44 +- ...ith-tls-outgoing-max-version.latest.golden | 38 +- ...ith-tls-outgoing-min-version.latest.golden | 38 +- .../clusters/ingress-gateway.latest.golden | 34 +- ...gress-grpc-multiple-services.latest.golden | 125 - ...gress-http-multiple-services.latest.golden | 211 - .../ingress-lb-in-resolver.latest.golden | 68 +- ...-listeners-duplicate-service.latest.golden | 64 +- ...itter-with-resolver-redirect.latest.golden | 64 +- ...and-failover-to-cluster-peer.latest.golden | 66 +- ...ress-with-chain-and-failover.latest.golden | 66 +- ...hain-and-router-header-manip.latest.golden | 1537 --- ...ngress-with-chain-and-router.latest.golden | 1537 --- ...ress-with-chain-and-splitter.latest.golden | 211 - ...ress-with-chain-external-sni.latest.golden | 34 +- .../clusters/ingress-with-chain.latest.golden | 34 +- ...efaults-passive-health-check.latest.golden | 47 +- ...ults-service-max-connections.latest.golden | 40 +- .../ingress-with-grpc-router.latest.golden | 109 - ...ith-grpc-single-tls-listener.latest.golden | 125 - ...d-grpc-multiple-tls-listener.latest.golden | 125 - ...th-http2-single-tls-listener.latest.golden | 125 - ...efaults-passive-health-check.latest.golden | 48 +- ...ults-service-max-connections.latest.golden | 40 +- ...ervice-passive-health-check.latest.golden} | 56 +- ...h-sds-listener+service-level.latest.golden | 109 - ...h-sds-listener-gw-level-http.latest.golden | 58 - ...-listener-gw-level-mixed-tls.latest.golden | 109 - ...s-with-sds-listener-gw-level.latest.golden | 58 - ...-sds-listener-level-wildcard.latest.golden | 109 - ...ress-with-sds-listener-level.latest.golden | 109 - ...-sds-listener-listener-level.latest.golden | 58 - ...ess-with-sds-service-level-2.latest.golden | 109 - ...s-service-level-mixed-no-tls.latest.golden | 109 - ...-sds-service-level-mixed-tls.latest.golden | 109 - ...gress-with-sds-service-level.latest.golden | 109 - ...with-service-max-connections.latest.golden | 40 +- ...service-passive-health-check.latest.golden | 47 +- ...ess-with-single-tls-listener.latest.golden | 109 - ...ough-local-gateway-triggered.latest.golden | 96 +- ...ilover-through-local-gateway.latest.golden | 96 +- ...ugh-remote-gateway-triggered.latest.golden | 96 +- ...lover-through-remote-gateway.latest.golden | 96 +- ...ough-local-gateway-triggered.latest.golden | 66 +- ...ilover-through-local-gateway.latest.golden | 66 +- ...ugh-remote-gateway-triggered.latest.golden | 66 +- ...lover-through-remote-gateway.latest.golden | 66 +- ...h-tls-listener-cipher-suites.latest.golden | 58 - ...ith-tls-listener-max-version.latest.golden | 58 - ...ith-tls-listener-min-version.latest.golden | 58 - .../ingress-with-tls-listener.latest.golden | 58 - ...n-listeners-gateway-defaults.latest.golden | 211 - ...ixed-cipher-suites-listeners.latest.golden | 109 - ...ess-with-tls-mixed-listeners.latest.golden | 109 - ...-mixed-max-version-listeners.latest.golden | 160 - ...-mixed-min-version-listeners.latest.golden | 160 - ...-balance-inbound-connections.latest.golden | 136 - ...tbound-connections-bind-port.latest.golden | 136 - .../listener-bind-address-port.latest.golden | 136 - .../listener-bind-address.latest.golden | 136 - .../clusters/listener-bind-port.latest.golden | 136 - ...ener-max-inbound-connections.latest.golden | 143 - .../listener-unix-domain-socket.latest.golden | 136 - ...ateway-with-peered-upstreams.latest.golden | 72 +- ...esh-gateway-custom-addresses.latest.golden | 106 - ...teway-default-service-subset.latest.golden | 158 - ...mesh-gateway-hash-lb-ignored.latest.golden | 98 +- ...teway-ignore-extra-resolvers.latest.golden | 98 +- ...rmation-in-federation-states.latest.golden | 106 - .../mesh-gateway-no-services.latest.golden | 4 +- ...gateway-non-hash-lb-injected.latest.golden | 104 +- ...rmation-in-federation-states.latest.golden | 106 - ...ateway-peering-control-plane.latest.golden | 18 +- ...mesh-gateway-service-subsets.latest.golden | 98 +- ...esh-gateway-service-subsets2.latest.golden | 158 - ...esh-gateway-service-timeouts.latest.golden | 98 +- ...esh-gateway-tagged-addresses.latest.golden | 106 - .../mesh-gateway-tcp-keepalives.latest.golden | 82 +- ...ing-federation-control-plane.latest.golden | 205 - ...eway-using-federation-states.latest.golden | 70 +- ...ed-services-http-with-router.latest.golden | 145 +- ...xported-peered-services-http.latest.golden | 118 +- ...ith-exported-peered-services.latest.golden | 124 +- ...ith-imported-peered-services.latest.golden | 30 +- ...through-mesh-gateway-enabled.latest.golden | 30 +- .../clusters/mesh-gateway.latest.golden | 70 +- ...itter-with-resolver-redirect.latest.golden | 98 +- .../telemetry-collector.latest.golden | 112 +- ...-custom-and-tagged-addresses.latest.golden | 148 - ...ateway-custom-trace-listener.latest.golden | 148 - ...teway-default-service-subset.latest.golden | 202 - ...way-hostname-service-subsets.latest.golden | 96 +- ...teway-http2-upstream-subsets.latest.golden | 106 +- ...ating-gateway-http2-upstream.latest.golden | 38 +- ...teway-ignore-extra-resolvers.latest.golden | 106 +- ...y-lb-config-no-hash-policies.latest.golden | 217 - ...erminating-gateway-lb-config.latest.golden | 118 +- ...minating-gateway-no-api-cert.latest.golden | 148 - ...minating-gateway-no-services.latest.golden | 4 +- ...ting-gateway-service-subsets.latest.golden | 118 +- .../terminating-gateway-sni.latest.golden | 92 +- ...ating-gateway-tcp-keepalives.latest.golden | 78 +- ...teway-with-peer-trust-bundle.latest.golden | 148 - ...h-tls-incoming-cipher-suites.latest.golden | 148 - ...ith-tls-incoming-max-version.latest.golden | 148 - ...ith-tls-incoming-min-version.latest.golden | 148 - .../terminating-gateway.latest.golden | 66 +- ...xy-catalog-destinations-only.latest.golden | 128 +- ...arent-proxy-destination-http.latest.golden | 180 +- ...ransparent-proxy-destination.latest.golden | 180 +- ...roxy-dial-instances-directly.latest.golden | 174 +- ...nsparent-proxy-http-upstream.latest.golden | 245 - ...ng-gateway-destinations-only.latest.golden | 92 +- ...nt-proxy-terminating-gateway.latest.golden | 238 - ...-proxy-with-peered-upstreams.latest.golden | 94 +- ...h-resolver-redirect-upstream.latest.golden | 194 - .../clusters/transparent-proxy.latest.golden | 134 +- ...meout-ms-ingress-with-router.latest.golden | 130 +- ...fetch-timeout-ms-mgw-peering.latest.golden | 12 +- ...xds-fetch-timeout-ms-sidecar.latest.golden | 138 +- ...meout-ms-tproxy-http-peering.latest.golden | 170 - ...imeout-ms-tproxy-passthrough.latest.golden | 28 +- .../access-logs-defaults.latest.golden | 75 - .../access-logs-json-file.latest.golden | 75 - ...t-stderr-disablelistenerlogs.latest.golden | 75 - ...ttp-listener-with-http-route.latest.golden | 5 - .../api-gateway-http-listener.latest.golden | 5 - ...api-gateway-nil-config-entry.latest.golden | 5 - ...ener-with-tcp-and-http-route.latest.golden | 5 - ...-tcp-listener-with-tcp-route.latest.golden | 5 - .../api-gateway-tcp-listener.latest.golden | 5 - ...oute-and-inline-certificate.latest.golden} | 28 +- ...-route-timeoutfilter-one-set.latest.golden | 41 - .../api-gateway-with-http-route.latest.golden | 41 - ...eway-with-multiple-hostnames.latest.golden | 5 - ...multiple-inline-certificates.latest.golden | 5 - ...route-and-inline-certificate.latest.golden | 4 +- .../endpoints/api-gateway.latest.golden | 5 - ...nect-proxy-exported-to-peers.latest.golden | 4 +- ...connect-proxy-lb-in-resolver.latest.golden | 75 - ...nnect-proxy-resolver-with-lb.latest.golden | 75 - ...t-proxy-route-to-lb-resolver.latest.golden | 75 - ...ct-proxy-splitter-overweight.latest.golden | 41 - ...nect-proxy-upstream-defaults.latest.golden | 75 - ...and-failover-to-cluster-peer.latest.golden | 102 +- ...roxy-with-chain-and-failover.latest.golden | 116 +- ...oxy-with-chain-and-overrides.latest.golden | 4 +- ...and-redirect-to-cluster-peer.latest.golden | 66 +- ...-proxy-with-chain-and-router.latest.golden | 75 - ...roxy-with-chain-and-splitter.latest.golden | 75 - ...roxy-with-chain-external-sni.latest.golden | 4 +- ...nnect-proxy-with-chain-http2.latest.golden | 75 - .../connect-proxy-with-chain.latest.golden | 4 +- ...ult-chain-and-custom-cluster.latest.golden | 4 +- ...onnect-proxy-with-grpc-chain.latest.golden | 75 - ...nnect-proxy-with-grpc-router.latest.golden | 75 - ...onnect-proxy-with-http-chain.latest.golden | 75 - ...nnect-proxy-with-http2-chain.latest.golden | 75 - ...-jwt-config-entry-with-local.latest.golden | 75 - ...onfig-entry-with-remote-jwks.latest.golden | 75 - ...d-upstreams-escape-overrides.latest.golden | 29 - ...-with-peered-upstreams-http2.latest.golden | 29 - ...-proxy-with-peered-upstreams.latest.golden | 4 +- ...ough-local-gateway-triggered.latest.golden | 152 +- ...ilover-through-local-gateway.latest.golden | 116 +- ...ugh-remote-gateway-triggered.latest.golden | 152 +- ...lover-through-remote-gateway.latest.golden | 116 +- ...ough-local-gateway-triggered.latest.golden | 116 +- ...ilover-through-local-gateway.latest.golden | 116 +- ...ugh-remote-gateway-triggered.latest.golden | 116 +- ...lover-through-remote-gateway.latest.golden | 116 +- ...connect-proxy-with-tcp-chain.latest.golden | 75 - ...h-tls-incoming-cipher-suites.latest.golden | 75 - ...ith-tls-incoming-max-version.latest.golden | 75 - ...ith-tls-incoming-min-version.latest.golden | 75 - ...h-tls-outgoing-cipher-suites.latest.golden | 75 - ...ith-tls-outgoing-max-version.latest.golden | 75 - ...ls-outgoing-min-version-auto.latest.golden | 75 - ...ith-tls-outgoing-min-version.latest.golden | 75 - ...h-tproxy-and-permissive-mtls.latest.golden | 75 - ...t-tproxy-and-permissive-mtls.latest.golden | 75 - ...-limits-max-connections-only.latest.golden | 75 - .../custom-limits-set-to-zero.latest.golden | 75 - .../endpoints/custom-limits.latest.golden | 75 - .../endpoints/custom-local-app.latest.golden | 75 - ...stom-max-inbound-connections.latest.golden | 75 - ...thcheck-zero-consecutive_5xx.latest.golden | 75 - .../custom-passive-healthcheck.latest.golden | 75 - ...ustom-public-listener-http-2.latest.golden | 75 - ...public-listener-http-missing.latest.golden | 75 - .../custom-public-listener-http.latest.golden | 75 - .../custom-public-listener.latest.golden | 75 - .../endpoints/custom-timeouts.latest.golden | 75 - .../custom-trace-listener.latest.golden | 75 - ...ustom-upstream-default-chain.latest.golden | 75 - ...eam-ignored-with-disco-chain.latest.golden | 109 - ...upstream-with-prepared-query.latest.golden | 41 - .../endpoints/custom-upstream.latest.golden | 75 - .../testdata/endpoints/defaults.latest.golden | 4 +- ...am-service-with-unix-sockets.latest.golden | 75 - .../expose-checks-grpc.latest.golden | 5 - ...ecks-http-with-bind-override.latest.golden | 5 - .../expose-checks-http.latest.golden | 5 - .../endpoints/expose-checks.latest.golden | 5 - ...paths-grpc-new-cluster-http1.latest.golden | 5 - ...expose-paths-local-app-paths.latest.golden | 5 - ...pose-paths-new-cluster-http2.latest.golden | 5 - .../grpc-public-listener.latest.golden | 75 - .../http-listener-with-timeouts.latest.golden | 75 - ...http-public-listener-no-xfcc.latest.golden | 75 - .../http-public-listener.latest.golden | 75 - .../endpoints/http-upstream.latest.golden | 75 - .../http2-public-listener.latest.golden | 75 - .../ingress-config-entry-nil.latest.golden | 5 - .../ingress-defaults-no-chain.latest.golden | 5 - .../ingress-gateway-bind-addrs.latest.golden | 41 - ...ess-gateway-nil-config-entry.latest.golden | 4 +- .../ingress-gateway-no-services.latest.golden | 4 +- ...h-tls-outgoing-cipher-suites.latest.golden | 41 - ...ith-tls-outgoing-max-version.latest.golden | 41 - ...ith-tls-outgoing-min-version.latest.golden | 41 - .../endpoints/ingress-gateway.latest.golden | 4 +- ...gress-grpc-multiple-services.latest.golden | 75 - ...gress-http-multiple-services.latest.golden | 143 - .../ingress-lb-in-resolver.latest.golden | 41 - ...-listeners-duplicate-service.latest.golden | 4 +- ...itter-with-resolver-redirect.latest.golden | 4 +- ...and-failover-to-cluster-peer.latest.golden | 66 +- ...ress-with-chain-and-failover.latest.golden | 80 +- ...hain-and-router-header-manip.latest.golden | 41 - ...ngress-with-chain-and-router.latest.golden | 41 - ...ress-with-chain-and-splitter.latest.golden | 41 - ...ress-with-chain-external-sni.latest.golden | 4 +- .../ingress-with-chain.latest.golden | 4 +- ...efaults-passive-health-check.latest.golden | 41 - ...ults-service-max-connections.latest.golden | 41 - .../ingress-with-grpc-router.latest.golden | 41 - ...ith-grpc-single-tls-listener.latest.golden | 75 - ...d-grpc-multiple-tls-listener.latest.golden | 75 - ...th-http2-single-tls-listener.latest.golden | 75 - ...efaults-passive-health-check.latest.golden | 41 - ...ults-service-max-connections.latest.golden | 41 - ...h-sds-listener+service-level.latest.golden | 75 - ...h-sds-listener-gw-level-http.latest.golden | 41 - ...-listener-gw-level-mixed-tls.latest.golden | 75 - ...s-with-sds-listener-gw-level.latest.golden | 41 - ...-sds-listener-level-wildcard.latest.golden | 75 - ...ress-with-sds-listener-level.latest.golden | 75 - ...-sds-listener-listener-level.latest.golden | 41 - ...ess-with-sds-service-level-2.latest.golden | 75 - ...s-service-level-mixed-no-tls.latest.golden | 75 - ...-sds-service-level-mixed-tls.latest.golden | 75 - ...gress-with-sds-service-level.latest.golden | 75 - ...with-service-max-connections.latest.golden | 41 - ...service-passive-health-check.latest.golden | 41 - ...ess-with-single-tls-listener.latest.golden | 75 - ...ough-local-gateway-triggered.latest.golden | 116 +- ...ilover-through-local-gateway.latest.golden | 80 +- ...ugh-remote-gateway-triggered.latest.golden | 116 +- ...lover-through-remote-gateway.latest.golden | 80 +- ...ough-local-gateway-triggered.latest.golden | 80 +- ...ilover-through-local-gateway.latest.golden | 80 +- ...ugh-remote-gateway-triggered.latest.golden | 80 +- ...lover-through-remote-gateway.latest.golden | 80 +- ...h-tls-listener-cipher-suites.latest.golden | 41 - ...ith-tls-listener-max-version.latest.golden | 41 - ...ith-tls-listener-min-version.latest.golden | 41 - .../ingress-with-tls-listener.latest.golden | 41 - ...n-listeners-gateway-defaults.latest.golden | 143 - ...ixed-cipher-suites-listeners.latest.golden | 75 - ...ess-with-tls-mixed-listeners.latest.golden | 75 - ...-mixed-max-version-listeners.latest.golden | 109 - ...-mixed-min-version-listeners.latest.golden | 109 - ...-balance-inbound-connections.latest.golden | 75 - ...tbound-connections-bind-port.latest.golden | 75 - .../listener-bind-address-port.latest.golden | 75 - .../listener-bind-address.latest.golden | 75 - .../listener-bind-port.latest.golden | 75 - ...ener-max-inbound-connections.latest.golden | 75 - .../listener-unix-domain-socket.latest.golden | 75 - ...ateway-with-peered-upstreams.latest.golden | 4 +- ...esh-gateway-custom-addresses.latest.golden | 145 - ...teway-default-service-subset.latest.golden | 4 +- ...mesh-gateway-hash-lb-ignored.latest.golden | 159 - ...teway-ignore-extra-resolvers.latest.golden | 120 - ...rmation-in-federation-states.latest.golden | 4 +- .../mesh-gateway-no-services.latest.golden | 4 +- ...gateway-non-hash-lb-injected.latest.golden | 159 - ...rmation-in-federation-states.latest.golden | 4 +- ...ateway-peering-control-plane.latest.golden | 4 +- ...mesh-gateway-service-subsets.latest.golden | 118 +- ...esh-gateway-service-subsets2.latest.golden | 269 - ...esh-gateway-service-timeouts.latest.golden | 159 - ...esh-gateway-tagged-addresses.latest.golden | 145 - .../mesh-gateway-tcp-keepalives.latest.golden | 145 - ...ing-federation-control-plane.latest.golden | 249 - ...eway-using-federation-states.latest.golden | 4 +- ...ed-services-http-with-router.latest.golden | 104 +- ...xported-peered-services-http.latest.golden | 4 +- ...ith-exported-peered-services.latest.golden | 4 +- ...ith-imported-peered-services.latest.golden | 4 +- ...through-mesh-gateway-enabled.latest.golden | 4 +- .../endpoints/mesh-gateway.latest.golden | 4 +- ...itter-with-resolver-redirect.latest.golden | 4 +- .../telemetry-collector.latest.golden | 102 +- ...-custom-and-tagged-addresses.latest.golden | 41 - ...ateway-custom-trace-listener.latest.golden | 41 - ...teway-default-service-subset.latest.golden | 4 +- ...way-hostname-service-subsets.latest.golden | 41 - ...teway-http2-upstream-subsets.latest.golden | 5 - ...ating-gateway-http2-upstream.latest.golden | 5 - ...teway-ignore-extra-resolvers.latest.golden | 28 - ...y-lb-config-no-hash-policies.latest.golden | 28 - ...erminating-gateway-lb-config.latest.golden | 28 - ...minating-gateway-no-api-cert.latest.golden | 41 - ...minating-gateway-no-services.latest.golden | 4 +- ...ting-gateway-service-subsets.latest.golden | 4 +- .../terminating-gateway-sni.latest.golden | 41 - ...ating-gateway-tcp-keepalives.latest.golden | 41 - ...teway-with-peer-trust-bundle.latest.golden | 41 - ...h-tls-incoming-cipher-suites.latest.golden | 41 - ...ith-tls-incoming-max-version.latest.golden | 41 - ...ith-tls-incoming-min-version.latest.golden | 41 - .../terminating-gateway.latest.golden | 4 +- ...xy-catalog-destinations-only.latest.golden | 104 - ...arent-proxy-destination-http.latest.golden | 4 +- ...ransparent-proxy-destination.latest.golden | 4 +- ...roxy-dial-instances-directly.latest.golden | 131 - ...nsparent-proxy-http-upstream.latest.golden | 104 - ...ng-gateway-destinations-only.latest.golden | 4 +- ...nt-proxy-terminating-gateway.latest.golden | 119 - ...-proxy-with-peered-upstreams.latest.golden | 8 +- ...h-resolver-redirect-upstream.latest.golden | 63 - .../endpoints/transparent-proxy.latest.golden | 8 +- ...meout-ms-tproxy-http-peering.latest.golden | 29 - .../jwt_authn/intention-with-path.golden | 18 +- .../testdata/jwt_authn/local-provider.golden | 12 +- ...ltiple-providers-and-one-permission.golden | 49 +- .../testdata/jwt_authn/remote-provider.golden | 18 +- .../top-level-provider-with-permission.golden | 19 +- ...ttp-provider-with-hostname-and-port.golden | 2 +- ...http-provider-with-hostname-no-port.golden | 2 +- .../http-provider-with-ip-and-port.golden | 2 +- .../http-provider-with-ip-no-port.golden | 2 +- ...tps-provider-with-hostname-and-port.golden | 4 +- ...ttps-provider-with-hostname-no-port.golden | 4 +- .../https-provider-with-ip-and-port.golden | 6 +- .../https-provider-with-ip-no-port.golden | 4 +- .../access-logs-defaults.latest.golden | 398 +- .../access-logs-json-file.latest.golden | 144 +- ...t-stderr-disablelistenerlogs.latest.golden | 36 +- ...ttp-listener-with-http-route.latest.golden | 22 +- .../api-gateway-http-listener.latest.golden | 6 +- ...api-gateway-nil-config-entry.latest.golden | 6 +- ...ener-with-tcp-and-http-route.latest.golden | 28 +- ...-tcp-listener-with-tcp-route.latest.golden | 36 +- .../api-gateway-tcp-listener.latest.golden | 6 +- .../api-gateway-tcp-listeners.latest.golden | 5 + ...oute-and-inline-certificate.latest.golden} | 22 +- ...-route-timeoutfilter-one-set.latest.golden | 85 - .../api-gateway-with-http-route.latest.golden | 85 - ...multiple-inline-certificates.latest.golden | 102 - ...route-and-inline-certificate.latest.golden | 14 +- .../listeners/api-gateway.latest.golden | 6 +- ...nect-proxy-exported-to-peers.latest.golden | 18 +- ...connect-proxy-lb-in-resolver.latest.golden | 137 - ...nnect-proxy-resolver-with-lb.latest.golden | 137 - ...t-proxy-route-to-lb-resolver.latest.golden | 137 - ...ct-proxy-splitter-overweight.latest.golden | 137 - ...nect-proxy-upstream-defaults.latest.golden | 30 +- ...and-failover-to-cluster-peer.latest.golden | 132 +- ...roxy-with-chain-and-failover.latest.golden | 115 - ...oxy-with-chain-and-overrides.latest.golden | 38 +- ...and-redirect-to-cluster-peer.latest.golden | 132 +- ...-proxy-with-chain-and-router.latest.golden | 137 - ...roxy-with-chain-and-splitter.latest.golden | 137 - ...roxy-with-chain-external-sni.latest.golden | 30 +- ...nnect-proxy-with-chain-http2.latest.golden | 152 - .../connect-proxy-with-chain.latest.golden | 115 - ...ult-chain-and-custom-cluster.latest.golden | 115 - ...onnect-proxy-with-grpc-chain.latest.golden | 38 +- ...nnect-proxy-with-grpc-router.latest.golden | 151 - ...onnect-proxy-with-http-chain.latest.golden | 36 +- ...nnect-proxy-with-http2-chain.latest.golden | 38 +- ...-jwt-config-entry-with-local.latest.golden | 209 +- ...onfig-entry-with-remote-jwks.latest.golden | 115 - ...d-upstreams-escape-overrides.latest.golden | 114 - ...-with-peered-upstreams-http2.latest.golden | 189 - ...-proxy-with-peered-upstreams.latest.golden | 30 +- ...ough-local-gateway-triggered.latest.golden | 115 - ...ilover-through-local-gateway.latest.golden | 115 - ...ugh-remote-gateway-triggered.latest.golden | 115 - ...lover-through-remote-gateway.latest.golden | 115 - ...ough-local-gateway-triggered.latest.golden | 115 - ...ilover-through-local-gateway.latest.golden | 30 +- ...ugh-remote-gateway-triggered.latest.golden | 115 - ...lover-through-remote-gateway.latest.golden | 30 +- ...connect-proxy-with-tcp-chain.latest.golden | 30 +- ...h-tls-incoming-cipher-suites.latest.golden | 38 +- ...ith-tls-incoming-max-version.latest.golden | 32 +- ...ith-tls-incoming-min-version.latest.golden | 32 +- ...h-tls-outgoing-cipher-suites.latest.golden | 115 - ...ith-tls-outgoing-max-version.latest.golden | 115 - ...ls-outgoing-min-version-auto.latest.golden | 30 +- ...ith-tls-outgoing-min-version.latest.golden | 115 - ...h-tproxy-and-permissive-mtls.latest.golden | 38 +- ...t-tproxy-and-permissive-mtls.latest.golden | 26 +- ...-limits-max-connections-only.latest.golden | 115 - .../custom-limits-set-to-zero.latest.golden | 115 - .../listeners/custom-limits.latest.golden | 115 - .../listeners/custom-local-app.latest.golden | 115 - ...stom-max-inbound-connections.latest.golden | 123 - ...thcheck-zero-consecutive_5xx.latest.golden | 115 - .../custom-passive-healthcheck.latest.golden | 115 - ...ustom-public-listener-http-2.latest.golden | 62 +- ...public-listener-http-missing.latest.golden | 40 +- .../custom-public-listener-http.latest.golden | 62 +- .../custom-public-listener.latest.golden | 32 +- .../listeners/custom-timeouts.latest.golden | 115 - .../custom-trace-listener.latest.golden | 115 +- ...ustom-upstream-default-chain.latest.golden | 115 - ...eam-ignored-with-disco-chain.latest.golden | 30 +- ...upstream-with-prepared-query.latest.golden | 152 - .../listeners/custom-upstream.latest.golden | 37 +- .../testdata/listeners/defaults.latest.golden | 30 +- ...am-service-with-unix-sockets.latest.golden | 115 - .../expose-checks-grpc.latest.golden | 143 - ...ecks-http-with-bind-override.latest.golden | 142 - .../expose-checks-http.latest.golden | 142 - .../listeners/expose-checks.latest.golden | 34 +- ...paths-grpc-new-cluster-http1.latest.golden | 179 - ...expose-paths-local-app-paths.latest.golden | 56 +- ...pose-paths-new-cluster-http2.latest.golden | 58 +- .../grpc-public-listener.latest.golden | 76 +- .../http-listener-with-timeouts.latest.golden | 99 +- ...http-public-listener-no-xfcc.latest.golden | 62 +- .../http-public-listener.latest.golden | 95 +- .../listeners/http-upstream.latest.golden | 40 +- .../http2-public-listener.latest.golden | 99 +- .../ingress-config-entry-nil.latest.golden | 5 - .../ingress-defaults-no-chain.latest.golden | 5 - .../ingress-gateway-bind-addrs.latest.golden | 22 +- ...ess-gateway-nil-config-entry.latest.golden | 4 +- .../ingress-gateway-no-services.latest.golden | 4 +- ...h-tls-outgoing-cipher-suites.latest.golden | 32 - ...ith-tls-outgoing-max-version.latest.golden | 32 - ...ith-tls-outgoing-min-version.latest.golden | 32 - .../listeners/ingress-gateway.latest.golden | 10 +- ...gress-grpc-multiple-services.latest.golden | 24 +- ...gress-http-multiple-services.latest.golden | 40 +- .../ingress-lb-in-resolver.latest.golden | 54 - ...-listeners-duplicate-service.latest.golden | 101 - ...itter-with-resolver-redirect.latest.golden | 22 +- ...and-failover-to-cluster-peer.latest.golden | 32 - ...ress-with-chain-and-failover.latest.golden | 32 - ...hain-and-router-header-manip.latest.golden | 54 - ...ngress-with-chain-and-router.latest.golden | 54 - ...ress-with-chain-and-splitter.latest.golden | 54 - ...ress-with-chain-external-sni.latest.golden | 10 +- .../ingress-with-chain.latest.golden | 32 - ...efaults-passive-health-check.latest.golden | 32 - ...ults-service-max-connections.latest.golden | 32 - .../ingress-with-grpc-router.latest.golden | 54 - ...ith-grpc-single-tls-listener.latest.golden | 60 +- ...d-grpc-multiple-tls-listener.latest.golden | 76 +- ...th-http2-single-tls-listener.latest.golden | 60 +- ...efaults-passive-health-check.latest.golden | 32 - ...ults-service-max-connections.latest.golden | 32 - ...h-sds-listener+service-level.latest.golden | 62 +- ...h-sds-listener-gw-level-http.latest.golden | 34 +- ...-listener-gw-level-mixed-tls.latest.golden | 26 +- ...s-with-sds-listener-gw-level.latest.golden | 20 +- ...-sds-listener-level-wildcard.latest.golden | 87 - ...ress-with-sds-listener-level.latest.golden | 87 - ...-sds-listener-listener-level.latest.golden | 20 +- ...ess-with-sds-service-level-2.latest.golden | 172 - ...s-service-level-mixed-no-tls.latest.golden | 50 +- ...-sds-service-level-mixed-tls.latest.golden | 134 - ...gress-with-sds-service-level.latest.golden | 62 +- ...with-service-max-connections.latest.golden | 32 - ...service-passive-health-check.latest.golden | 32 - ...ess-with-single-tls-listener.latest.golden | 54 +- ...ough-local-gateway-triggered.latest.golden | 32 - ...ilover-through-local-gateway.latest.golden | 32 - ...ugh-remote-gateway-triggered.latest.golden | 32 - ...lover-through-remote-gateway.latest.golden | 32 - ...ough-local-gateway-triggered.latest.golden | 32 - ...ilover-through-local-gateway.latest.golden | 10 +- ...ugh-remote-gateway-triggered.latest.golden | 32 - ...lover-through-remote-gateway.latest.golden | 10 +- ...h-tls-listener-cipher-suites.latest.golden | 22 +- ...ith-tls-listener-max-version.latest.golden | 16 +- ...ith-tls-listener-min-version.latest.golden | 16 +- .../ingress-with-tls-listener.latest.golden | 14 +- ...n-listeners-gateway-defaults.latest.golden | 160 +- ...ixed-cipher-suites-listeners.latest.golden | 166 - ...ess-with-tls-mixed-listeners.latest.golden | 50 +- ...-mixed-max-version-listeners.latest.golden | 238 - ...-mixed-min-version-listeners.latest.golden | 100 +- ...-balance-inbound-connections.latest.golden | 40 +- ...tbound-connections-bind-port.latest.golden | 40 +- .../listener-bind-address-port.latest.golden | 30 +- .../listener-bind-address.latest.golden | 30 +- .../listener-bind-port.latest.golden | 30 +- ...ener-max-inbound-connections.latest.golden | 34 +- .../listener-unix-domain-socket.latest.golden | 34 +- ...ateway-with-peered-upstreams.latest.golden | 30 +- ...esh-gateway-custom-addresses.latest.golden | 84 +- ...teway-default-service-subset.latest.golden | 96 - ...mesh-gateway-hash-lb-ignored.latest.golden | 96 - ...teway-ignore-extra-resolvers.latest.golden | 96 - ...rmation-in-federation-states.latest.golden | 96 - .../mesh-gateway-no-services.latest.golden | 12 +- ...gateway-non-hash-lb-injected.latest.golden | 96 - ...rmation-in-federation-states.latest.golden | 96 - ...ateway-peering-control-plane.latest.golden | 16 +- ...mesh-gateway-service-subsets.latest.golden | 96 - ...esh-gateway-service-subsets2.latest.golden | 96 - ...esh-gateway-service-timeouts.latest.golden | 96 - ...esh-gateway-tagged-addresses.latest.golden | 44 +- .../mesh-gateway-tcp-keepalives.latest.golden | 96 - ...ing-federation-control-plane.latest.golden | 181 - ...eway-using-federation-states.latest.golden | 24 +- ...ed-services-http-with-router.latest.golden | 38 +- ...xported-peered-services-http.latest.golden | 90 +- ...ith-exported-peered-services.latest.golden | 24 +- ...ith-imported-peered-services.latest.golden | 12 +- ...through-mesh-gateway-enabled.latest.golden | 24 +- .../listeners/mesh-gateway.latest.golden | 24 +- ...itter-with-resolver-redirect.latest.golden | 36 +- .../telemetry-collector.latest.golden | 72 +- ...-custom-and-tagged-addresses.latest.golden | 112 +- ...ateway-custom-trace-listener.latest.golden | 246 - ...teway-default-service-subset.latest.golden | 346 - ...way-hostname-service-subsets.latest.golden | 462 - ...teway-http2-upstream-subsets.latest.golden | 46 - ...ating-gateway-http2-upstream.latest.golden | 46 - ...teway-ignore-extra-resolvers.latest.golden | 433 - ...y-lb-config-no-hash-policies.latest.golden | 433 - ...erminating-gateway-lb-config.latest.golden | 433 - ...minating-gateway-no-api-cert.latest.golden | 46 +- ...minating-gateway-no-services.latest.golden | 10 +- ...ting-gateway-service-subsets.latest.golden | 156 +- .../terminating-gateway-sni.latest.golden | 246 - ...ating-gateway-tcp-keepalives.latest.golden | 246 - ...teway-with-peer-trust-bundle.latest.golden | 1 + ...h-tls-incoming-cipher-suites.latest.golden | 90 +- ...ith-tls-incoming-max-version.latest.golden | 66 +- ...ith-tls-incoming-min-version.latest.golden | 66 +- .../terminating-gateway.latest.golden | 58 +- ...xy-catalog-destinations-only.latest.golden | 36 +- ...arent-proxy-destination-http.latest.golden | 58 +- ...ransparent-proxy-destination.latest.golden | 48 +- ...roxy-dial-instances-directly.latest.golden | 68 +- ...nsparent-proxy-http-upstream.latest.golden | 70 +- ...ng-gateway-destinations-only.latest.golden | 98 +- ...nt-proxy-terminating-gateway.latest.golden | 40 +- ...-proxy-with-peered-upstreams.latest.golden | 60 +- ...h-resolver-redirect-upstream.latest.golden | 54 +- .../listeners/transparent-proxy.latest.golden | 60 +- ...meout-ms-tproxy-http-peering.latest.golden | 189 - ...deny-all-and-path-allow--httpfilter.golden | 6 + ...fault-allow-deny-all-and-path-allow.golden | 9 + ...-deny-all-and-path-deny--httpfilter.golden | 9 + ...efault-allow-deny-all-and-path-deny.golden | 9 + ...ault-allow-kitchen-sink--httpfilter.golden | 18 + .../rbac/default-allow-kitchen-sink.golden | 18 + .../default-allow-one-deny--httpfilter.golden | 3 + .../rbac/default-allow-one-deny.golden | 3 + .../rbac/default-allow-path-allow.golden | 3 + ...default-allow-path-deny--httpfilter.golden | 3 + .../rbac/default-allow-path-deny.golden | 3 + ...w-service-wildcard-deny--httpfilter.golden | 3 + ...default-allow-service-wildcard-deny.golden | 3 + ...with-kitchen-sink-perms--httpfilter.golden | 83 +- ...e-intention-with-kitchen-sink-perms.golden | 3 + ...ath-deny-and-path-allow--httpfilter.golden | 3 + ...-allow-two-path-deny-and-path-allow.golden | 3 + ...default-deny-allow-deny--httpfilter.golden | 6 + .../rbac/default-deny-allow-deny.golden | 6 + ...deny-all-and-path-allow--httpfilter.golden | 3 + ...efault-deny-deny-all-and-path-allow.golden | 4 +- ...-deny-all-and-path-deny--httpfilter.golden | 4 +- ...default-deny-deny-all-and-path-deny.golden | 4 +- ...fault-deny-kitchen-sink--httpfilter.golden | 18 + .../rbac/default-deny-kitchen-sink.golden | 18 + ...t-deny-mixed-precedence--httpfilter.golden | 3 + .../rbac/default-deny-mixed-precedence.golden | 3 + .../default-deny-one-allow--httpfilter.golden | 3 + .../rbac/default-deny-one-allow.golden | 3 + ...default-deny-path-allow--httpfilter.golden | 3 + .../rbac/default-deny-path-allow.golden | 4 +- .../default-deny-path-deny--httpfilter.golden | 4 +- .../rbac/default-deny-path-deny.golden | 4 +- ...eny-peered-kitchen-sink--httpfilter.golden | 12 + .../default-deny-peered-kitchen-sink.golden | 9 + ...-service-wildcard-allow--httpfilter.golden | 3 + ...default-deny-service-wildcard-allow.golden | 3 + ...with-kitchen-sink-perms--httpfilter.golden | 83 +- ...e-intention-with-kitchen-sink-perms.golden | 4 +- ...ath-deny-and-path-allow--httpfilter.golden | 3 + ...t-deny-two-path-deny-and-path-allow.golden | 4 +- ...jwt-with-one-permission--httpfilter.golden | 17 +- ...y-top-level-jwt-with-one-permission.golden | 4 +- ...evel-jwt-no-permissions--httpfilter.golden | 27 +- .../rbac/top-level-jwt-no-permissions.golden | 3 + ...th-multiple-permissions--httpfilter.golden | 113 +- ...level-jwt-with-multiple-permissions.golden | 4 +- ...jwt-with-one-permission--httpfilter.golden | 47 +- .../top-level-jwt-with-one-permission.golden | 4 +- .../v2-L4-deny-L7-allow--httpfilter.golden | 77 - .../testdata/rbac/v2-L4-deny-L7-allow.golden | 42 - .../rbac/v2-default-allow--httpfilter.golden | 1 - .../xds/testdata/rbac/v2-default-allow.golden | 1 - .../rbac/v2-default-deny--httpfilter.golden | 7 - .../xds/testdata/rbac/v2-default-deny.golden | 8 - ...gnore-empty-permissions--httpfilter.golden | 20 - .../rbac/v2-ignore-empty-permissions.golden | 22 - .../rbac/v2-kitchen-sink--httpfilter.golden | 114 - .../xds/testdata/rbac/v2-kitchen-sink.golden | 116 - .../rbac/v2-path-excludes--httpfilter.golden | 47 - .../xds/testdata/rbac/v2-path-excludes.golden | 8 - ...-method-header-excludes--httpfilter.golden | 69 - .../v2-path-method-header-excludes.golden | 8 - ...tiple-destination-rules--httpfilter.golden | 183 - ...rmission-multiple-destination-rules.golden | 8 - ...ermission-with-excludes--httpfilter.golden | 104 - .../v2-single-permission-with-excludes.golden | 8 - ...with-kitchen-sink-perms--httpfilter.golden | 226 - ...-permission-with-kitchen-sink-perms.golden | 8 - .../routes/access-logs-defaults.latest.golden | 5 - .../access-logs-json-file.latest.golden | 5 - ...t-stderr-disablelistenerlogs.latest.golden | 5 - ...ttp-listener-with-http-route.latest.golden | 31 - .../api-gateway-http-listener.latest.golden | 5 - ...api-gateway-nil-config-entry.latest.golden | 5 - ...ener-with-tcp-and-http-route.latest.golden | 31 - ...-tcp-listener-with-tcp-route.latest.golden | 5 - .../api-gateway-tcp-listener.latest.golden | 5 - ...oute-and-inline-certificate.latest.golden} | 26 +- .../api-gateway-with-http-route.latest.golden | 58 - ...eway-with-multiple-hostnames.latest.golden | 68 +- ...multiple-inline-certificates.latest.golden | 5 - ...route-and-inline-certificate.latest.golden | 4 +- .../testdata/routes/api-gateway.latest.golden | 5 - ...nect-proxy-exported-to-peers.latest.golden | 4 +- ...connect-proxy-lb-in-resolver.latest.golden | 43 +- ...nnect-proxy-resolver-with-lb.latest.golden | 30 - ...t-proxy-route-to-lb-resolver.latest.golden | 38 - ...ct-proxy-splitter-overweight.latest.golden | 99 - ...nect-proxy-upstream-defaults.latest.golden | 5 - ...and-failover-to-cluster-peer.latest.golden | 32 +- ...roxy-with-chain-and-failover.latest.golden | 5 - ...oxy-with-chain-and-overrides.latest.golden | 10 +- ...and-redirect-to-cluster-peer.latest.golden | 32 +- ...-proxy-with-chain-and-router.latest.golden | 133 +- ...roxy-with-chain-and-splitter.latest.golden | 86 +- ...roxy-with-chain-external-sni.latest.golden | 32 +- ...nnect-proxy-with-chain-http2.latest.golden | 5 - .../connect-proxy-with-chain.latest.golden | 32 +- ...ult-chain-and-custom-cluster.latest.golden | 5 - ...onnect-proxy-with-grpc-chain.latest.golden | 31 - ...nnect-proxy-with-grpc-router.latest.golden | 10 +- ...onnect-proxy-with-http-chain.latest.golden | 31 - ...nnect-proxy-with-http2-chain.latest.golden | 31 - ...-jwt-config-entry-with-local.latest.golden | 5 - ...onfig-entry-with-remote-jwks.latest.golden | 5 - ...d-upstreams-escape-overrides.latest.golden | 5 - ...-with-peered-upstreams-http2.latest.golden | 5 - ...-proxy-with-peered-upstreams.latest.golden | 4 +- ...ough-local-gateway-triggered.latest.golden | 5 - ...ilover-through-local-gateway.latest.golden | 5 - ...ugh-remote-gateway-triggered.latest.golden | 5 - ...lover-through-remote-gateway.latest.golden | 5 - ...ough-local-gateway-triggered.latest.golden | 5 - ...ilover-through-local-gateway.latest.golden | 5 - ...ugh-remote-gateway-triggered.latest.golden | 5 - ...lover-through-remote-gateway.latest.golden | 5 - ...connect-proxy-with-tcp-chain.latest.golden | 5 - ...h-tls-incoming-cipher-suites.latest.golden | 5 - ...ith-tls-incoming-max-version.latest.golden | 5 - ...ith-tls-incoming-min-version.latest.golden | 5 - ...h-tls-outgoing-cipher-suites.latest.golden | 5 - ...ith-tls-outgoing-max-version.latest.golden | 5 - ...ls-outgoing-min-version-auto.latest.golden | 5 - ...ith-tls-outgoing-min-version.latest.golden | 5 - ...h-tproxy-and-permissive-mtls.latest.golden | 5 - ...t-tproxy-and-permissive-mtls.latest.golden | 5 - ...-limits-max-connections-only.latest.golden | 5 - .../custom-limits-set-to-zero.latest.golden | 5 - .../routes/custom-limits.latest.golden | 5 - .../routes/custom-local-app.latest.golden | 5 - ...stom-max-inbound-connections.latest.golden | 5 - ...thcheck-zero-consecutive_5xx.latest.golden | 5 - .../custom-passive-healthcheck.latest.golden | 5 - ...ustom-public-listener-http-2.latest.golden | 5 - ...public-listener-http-missing.latest.golden | 5 - .../custom-public-listener-http.latest.golden | 5 - .../custom-public-listener.latest.golden | 5 - .../routes/custom-timeouts.latest.golden | 5 - .../custom-trace-listener.latest.golden | 5 - ...ustom-upstream-default-chain.latest.golden | 5 - ...eam-ignored-with-disco-chain.latest.golden | 5 - ...upstream-with-prepared-query.latest.golden | 5 - .../routes/custom-upstream.latest.golden | 5 - .../testdata/routes/defaults.latest.golden | 4 +- ...am-service-with-unix-sockets.latest.golden | 5 - .../routes/expose-checks-grpc.latest.golden | 5 - ...ecks-http-with-bind-override.latest.golden | 5 - .../routes/expose-checks-http.latest.golden | 5 - .../routes/expose-checks.latest.golden | 5 - ...paths-grpc-new-cluster-http1.latest.golden | 5 - ...expose-paths-local-app-paths.latest.golden | 5 - ...pose-paths-new-cluster-http2.latest.golden | 5 - .../routes/grpc-public-listener.latest.golden | 5 - .../http-listener-with-timeouts.latest.golden | 5 - ...http-public-listener-no-xfcc.latest.golden | 5 - .../routes/http-public-listener.latest.golden | 5 - .../routes/http-upstream.latest.golden | 5 - .../http2-public-listener.latest.golden | 5 - .../ingress-config-entry-nil.latest.golden | 4 +- .../ingress-defaults-no-chain.latest.golden | 4 +- .../ingress-gateway-bind-addrs.latest.golden | 5 - ...ess-gateway-nil-config-entry.latest.golden | 5 - .../ingress-gateway-no-services.latest.golden | 5 - ...h-tls-outgoing-cipher-suites.latest.golden | 5 - ...ith-tls-outgoing-max-version.latest.golden | 5 - ...ith-tls-outgoing-min-version.latest.golden | 5 - .../routes/ingress-gateway.latest.golden | 5 - ...gress-grpc-multiple-services.latest.golden | 50 +- ...gress-http-multiple-services.latest.golden | 20 +- .../ingress-lb-in-resolver.latest.golden | 43 +- ...-listeners-duplicate-service.latest.golden | 72 - ...itter-with-resolver-redirect.latest.golden | 10 +- ...and-failover-to-cluster-peer.latest.golden | 5 - ...ress-with-chain-and-failover.latest.golden | 5 - ...hain-and-router-header-manip.latest.golden | 207 +- ...ngress-with-chain-and-router.latest.golden | 133 +- ...ress-with-chain-and-splitter.latest.golden | 86 +- ...ress-with-chain-external-sni.latest.golden | 4 +- .../routes/ingress-with-chain.latest.golden | 4 +- ...efaults-passive-health-check.latest.golden | 5 - ...ults-service-max-connections.latest.golden | 5 - .../ingress-with-grpc-router.latest.golden | 10 +- ...ith-grpc-single-tls-listener.latest.golden | 55 - ...d-grpc-multiple-tls-listener.latest.golden | 55 - ...th-http2-single-tls-listener.latest.golden | 55 - ...efaults-passive-health-check.latest.golden | 5 - ...ults-service-max-connections.latest.golden | 5 - ...h-sds-listener+service-level.latest.golden | 55 - ...h-sds-listener-gw-level-http.latest.golden | 31 - ...-listener-gw-level-mixed-tls.latest.golden | 5 - ...s-with-sds-listener-gw-level.latest.golden | 5 - ...-sds-listener-level-wildcard.latest.golden | 50 +- ...ress-with-sds-listener-level.latest.golden | 50 +- ...-sds-listener-listener-level.latest.golden | 5 - ...ess-with-sds-service-level-2.latest.golden | 57 - ...s-service-level-mixed-no-tls.latest.golden | 55 - ...-sds-service-level-mixed-tls.latest.golden | 60 +- ...gress-with-sds-service-level.latest.golden | 66 +- ...with-service-max-connections.latest.golden | 5 - ...service-passive-health-check.latest.golden | 5 - ...ess-with-single-tls-listener.latest.golden | 55 - ...ough-local-gateway-triggered.latest.golden | 5 - ...ilover-through-local-gateway.latest.golden | 5 - ...ugh-remote-gateway-triggered.latest.golden | 5 - ...lover-through-remote-gateway.latest.golden | 5 - ...ough-local-gateway-triggered.latest.golden | 5 - ...ilover-through-local-gateway.latest.golden | 5 - ...ugh-remote-gateway-triggered.latest.golden | 5 - ...lover-through-remote-gateway.latest.golden | 5 - ...h-tls-listener-cipher-suites.latest.golden | 5 - ...ith-tls-listener-max-version.latest.golden | 5 - ...ith-tls-listener-min-version.latest.golden | 5 - .../ingress-with-tls-listener.latest.golden | 5 - ...n-listeners-gateway-defaults.latest.golden | 127 - ...ixed-cipher-suites-listeners.latest.golden | 55 - ...ess-with-tls-mixed-listeners.latest.golden | 55 - ...-mixed-max-version-listeners.latest.golden | 79 - ...-mixed-min-version-listeners.latest.golden | 79 - ...-balance-inbound-connections.latest.golden | 5 - ...tbound-connections-bind-port.latest.golden | 5 - .../listener-bind-address-port.latest.golden | 5 - .../listener-bind-address.latest.golden | 5 - .../routes/listener-bind-port.latest.golden | 5 - ...ener-max-inbound-connections.latest.golden | 5 - .../listener-unix-domain-socket.latest.golden | 5 - ...ateway-with-peered-upstreams.latest.golden | 4 +- ...esh-gateway-custom-addresses.latest.golden | 5 - ...teway-default-service-subset.latest.golden | 5 - ...mesh-gateway-hash-lb-ignored.latest.golden | 5 - ...teway-ignore-extra-resolvers.latest.golden | 5 - ...rmation-in-federation-states.latest.golden | 5 - .../mesh-gateway-no-services.latest.golden | 5 - ...gateway-non-hash-lb-injected.latest.golden | 5 - ...rmation-in-federation-states.latest.golden | 5 - ...ateway-peering-control-plane.latest.golden | 4 +- ...mesh-gateway-service-subsets.latest.golden | 5 - ...esh-gateway-service-subsets2.latest.golden | 5 - ...esh-gateway-service-timeouts.latest.golden | 5 - ...esh-gateway-tagged-addresses.latest.golden | 5 - .../mesh-gateway-tcp-keepalives.latest.golden | 5 - ...ing-federation-control-plane.latest.golden | 5 - ...eway-using-federation-states.latest.golden | 5 - ...ed-services-http-with-router.latest.golden | 10 +- ...xported-peered-services-http.latest.golden | 22 +- ...ith-exported-peered-services.latest.golden | 4 +- ...ith-imported-peered-services.latest.golden | 4 +- ...through-mesh-gateway-enabled.latest.golden | 4 +- .../routes/mesh-gateway.latest.golden | 5 - ...itter-with-resolver-redirect.latest.golden | 10 +- .../routes/telemetry-collector.latest.golden | 6 +- ...-custom-and-tagged-addresses.latest.golden | 5 - ...ateway-custom-trace-listener.latest.golden | 5 - ...teway-default-service-subset.latest.golden | 5 - ...way-hostname-service-subsets.latest.golden | 103 - ...teway-http2-upstream-subsets.latest.golden | 5 - ...ating-gateway-http2-upstream.latest.golden | 5 - ...teway-ignore-extra-resolvers.latest.golden | 79 - ...y-lb-config-no-hash-policies.latest.golden | 82 - ...erminating-gateway-lb-config.latest.golden | 37 +- ...minating-gateway-no-api-cert.latest.golden | 5 - ...minating-gateway-no-services.latest.golden | 5 - ...ting-gateway-service-subsets.latest.golden | 79 - .../terminating-gateway-sni.latest.golden | 5 - ...ating-gateway-tcp-keepalives.latest.golden | 5 - ...teway-with-peer-trust-bundle.latest.golden | 5 - ...h-tls-incoming-cipher-suites.latest.golden | 5 - ...ith-tls-incoming-max-version.latest.golden | 5 - ...ith-tls-incoming-min-version.latest.golden | 5 - .../routes/terminating-gateway.latest.golden | 5 - ...xy-catalog-destinations-only.latest.golden | 5 - ...arent-proxy-destination-http.latest.golden | 20 +- ...ransparent-proxy-destination.latest.golden | 4 +- ...roxy-dial-instances-directly.latest.golden | 5 - ...nsparent-proxy-http-upstream.latest.golden | 5 - ...ng-gateway-destinations-only.latest.golden | 16 +- ...nt-proxy-terminating-gateway.latest.golden | 5 - ...-proxy-with-peered-upstreams.latest.golden | 4 +- ...h-resolver-redirect-upstream.latest.golden | 5 - .../routes/transparent-proxy.latest.golden | 4 +- ...meout-ms-ingress-with-router.latest.golden | 473 +- ...xds-fetch-timeout-ms-sidecar.latest.golden | 473 +- ...meout-ms-tproxy-http-peering.latest.golden | 5 - .../access-logs-defaults.latest.golden | 5 - .../access-logs-json-file.latest.golden | 5 - ...t-stderr-disablelistenerlogs.latest.golden | 5 - ...ttp-listener-with-http-route.latest.golden | 5 - .../api-gateway-http-listener.latest.golden | 5 - ...api-gateway-nil-config-entry.latest.golden | 5 - ...ener-with-tcp-and-http-route.latest.golden | 5 - ...-tcp-listener-with-tcp-route.latest.golden | 5 - .../api-gateway-tcp-listener.latest.golden | 5 - ...-route-timeoutfilter-one-set.latest.golden | 5 - .../api-gateway-with-http-route.latest.golden | 5 - ...eway-with-multiple-hostnames.latest.golden | 5 - ...multiple-inline-certificates.latest.golden | 5 - ...route-and-inline-certificate.latest.golden | 4 +- .../secrets/api-gateway.latest.golden | 5 - ...nect-proxy-exported-to-peers.latest.golden | 6 +- ...connect-proxy-lb-in-resolver.latest.golden | 5 - ...nnect-proxy-resolver-with-lb.latest.golden | 5 - ...t-proxy-route-to-lb-resolver.latest.golden | 5 - ...ct-proxy-splitter-overweight.latest.golden | 5 - ...nect-proxy-upstream-defaults.latest.golden | 5 - ...and-failover-to-cluster-peer.latest.golden | 6 +- ...roxy-with-chain-and-failover.latest.golden | 5 - ...oxy-with-chain-and-overrides.latest.golden | 5 - ...and-redirect-to-cluster-peer.latest.golden | 6 +- ...-proxy-with-chain-and-router.latest.golden | 5 - ...roxy-with-chain-and-splitter.latest.golden | 5 - ...roxy-with-chain-external-sni.latest.golden | 5 - ...nnect-proxy-with-chain-http2.latest.golden | 5 - .../connect-proxy-with-chain.latest.golden | 5 - ...ult-chain-and-custom-cluster.latest.golden | 5 - ...onnect-proxy-with-grpc-chain.latest.golden | 5 - ...nnect-proxy-with-grpc-router.latest.golden | 5 - ...onnect-proxy-with-http-chain.latest.golden | 5 - ...nnect-proxy-with-http2-chain.latest.golden | 5 - ...-jwt-config-entry-with-local.latest.golden | 5 - ...onfig-entry-with-remote-jwks.latest.golden | 5 - ...d-upstreams-escape-overrides.latest.golden | 5 - ...-with-peered-upstreams-http2.latest.golden | 5 - ...-upstreams-listener-override.latest.golden | 5 - ...-proxy-with-peered-upstreams.latest.golden | 6 +- ...ough-local-gateway-triggered.latest.golden | 5 - ...ilover-through-local-gateway.latest.golden | 5 - ...ugh-remote-gateway-triggered.latest.golden | 5 - ...lover-through-remote-gateway.latest.golden | 5 - ...ough-local-gateway-triggered.latest.golden | 5 - ...ilover-through-local-gateway.latest.golden | 5 - ...ugh-remote-gateway-triggered.latest.golden | 5 - ...lover-through-remote-gateway.latest.golden | 5 - ...connect-proxy-with-tcp-chain.latest.golden | 5 - ...h-tls-incoming-cipher-suites.latest.golden | 5 - ...ith-tls-incoming-max-version.latest.golden | 5 - ...ith-tls-incoming-min-version.latest.golden | 5 - ...h-tls-outgoing-cipher-suites.latest.golden | 5 - ...ith-tls-outgoing-max-version.latest.golden | 5 - ...ls-outgoing-min-version-auto.latest.golden | 5 - ...ith-tls-outgoing-min-version.latest.golden | 5 - ...h-tproxy-and-permissive-mtls.latest.golden | 5 - ...t-tproxy-and-permissive-mtls.latest.golden | 5 - ...-limits-max-connections-only.latest.golden | 5 - .../custom-limits-set-to-zero.latest.golden | 5 - .../secrets/custom-limits.latest.golden | 5 - .../secrets/custom-local-app.latest.golden | 5 - ...stom-max-inbound-connections.latest.golden | 5 - ...thcheck-zero-consecutive_5xx.latest.golden | 5 - .../custom-passive-healthcheck.latest.golden | 5 - ...ustom-public-listener-http-2.latest.golden | 5 - ...public-listener-http-missing.latest.golden | 5 - .../custom-public-listener-http.latest.golden | 5 - .../custom-public-listener.latest.golden | 5 - .../secrets/custom-timeouts.latest.golden | 5 - .../custom-trace-listener.latest.golden | 5 - ...ustom-upstream-default-chain.latest.golden | 5 - ...eam-ignored-with-disco-chain.latest.golden | 5 - ...upstream-with-prepared-query.latest.golden | 5 - .../secrets/custom-upstream.latest.golden | 5 - .../testdata/secrets/defaults.latest.golden | 6 +- ...am-service-with-unix-sockets.latest.golden | 5 - .../secrets/expose-checks-grpc.latest.golden | 5 - ...ecks-http-with-bind-override.latest.golden | 5 - .../secrets/expose-checks-http.latest.golden | 5 - .../secrets/expose-checks.latest.golden | 5 - ...paths-grpc-new-cluster-http1.latest.golden | 5 - ...expose-paths-local-app-paths.latest.golden | 5 - ...pose-paths-new-cluster-http2.latest.golden | 5 - .../grpc-public-listener.latest.golden | 5 - .../http-listener-with-timeouts.latest.golden | 5 - ...http-public-listener-no-xfcc.latest.golden | 5 - .../http-public-listener.latest.golden | 5 - .../secrets/http-upstream.latest.golden | 5 - .../http2-public-listener.latest.golden | 5 - .../ingress-config-entry-nil.latest.golden | 5 - .../ingress-defaults-no-chain.latest.golden | 5 - .../ingress-gateway-bind-addrs.latest.golden | 5 - ...ess-gateway-nil-config-entry.latest.golden | 5 - .../ingress-gateway-no-services.latest.golden | 5 - ...h-tls-outgoing-cipher-suites.latest.golden | 5 - ...ith-tls-outgoing-max-version.latest.golden | 5 - ...ith-tls-outgoing-min-version.latest.golden | 5 - .../secrets/ingress-gateway.latest.golden | 5 - ...gress-grpc-multiple-services.latest.golden | 5 - ...gress-http-multiple-services.latest.golden | 5 - .../ingress-lb-in-resolver.latest.golden | 5 - ...-listeners-duplicate-service.latest.golden | 5 - ...itter-with-resolver-redirect.latest.golden | 5 - ...and-failover-to-cluster-peer.latest.golden | 5 - ...ress-with-chain-and-failover.latest.golden | 5 - ...hain-and-router-header-manip.latest.golden | 5 - ...ngress-with-chain-and-router.latest.golden | 5 - ...ress-with-chain-and-splitter.latest.golden | 5 - ...ress-with-chain-external-sni.latest.golden | 5 - .../secrets/ingress-with-chain.latest.golden | 5 - ...efaults-passive-health-check.latest.golden | 5 - ...ults-service-max-connections.latest.golden | 5 - .../ingress-with-grpc-router.latest.golden | 5 - ...ith-grpc-single-tls-listener.latest.golden | 5 - ...d-grpc-multiple-tls-listener.latest.golden | 5 - ...th-http2-single-tls-listener.latest.golden | 5 - ...efaults-passive-health-check.latest.golden | 5 - ...ults-service-max-connections.latest.golden | 5 - ...h-sds-listener+service-level.latest.golden | 5 - ...h-sds-listener-gw-level-http.latest.golden | 5 - ...-listener-gw-level-mixed-tls.latest.golden | 5 - ...s-with-sds-listener-gw-level.latest.golden | 5 - ...-sds-listener-level-wildcard.latest.golden | 5 - ...ress-with-sds-listener-level.latest.golden | 5 - ...-sds-listener-listener-level.latest.golden | 5 - ...ess-with-sds-service-level-2.latest.golden | 5 - ...s-service-level-mixed-no-tls.latest.golden | 5 - ...-sds-service-level-mixed-tls.latest.golden | 5 - ...gress-with-sds-service-level.latest.golden | 5 - ...with-service-max-connections.latest.golden | 5 - ...service-passive-health-check.latest.golden | 5 - ...ess-with-single-tls-listener.latest.golden | 5 - ...ough-local-gateway-triggered.latest.golden | 5 - ...ilover-through-local-gateway.latest.golden | 5 - ...ugh-remote-gateway-triggered.latest.golden | 5 - ...lover-through-remote-gateway.latest.golden | 5 - ...ough-local-gateway-triggered.latest.golden | 5 - ...ilover-through-local-gateway.latest.golden | 5 - ...ugh-remote-gateway-triggered.latest.golden | 5 - ...lover-through-remote-gateway.latest.golden | 5 - ...h-tls-listener-cipher-suites.latest.golden | 5 - ...ith-tls-listener-max-version.latest.golden | 5 - ...ith-tls-listener-min-version.latest.golden | 5 - .../ingress-with-tls-listener.latest.golden | 5 - ...n-listeners-gateway-defaults.latest.golden | 5 - ...ixed-cipher-suites-listeners.latest.golden | 5 - ...ess-with-tls-mixed-listeners.latest.golden | 5 - ...-mixed-max-version-listeners.latest.golden | 5 - ...-mixed-min-version-listeners.latest.golden | 5 - ...-balance-inbound-connections.latest.golden | 5 - ...tbound-connections-bind-port.latest.golden | 5 - .../listener-bind-address-port.latest.golden | 5 - .../listener-bind-address.latest.golden | 5 - .../secrets/listener-bind-port.latest.golden | 5 - ...ener-max-inbound-connections.latest.golden | 5 - .../listener-unix-domain-socket.latest.golden | 5 - ...ateway-with-peered-upstreams.latest.golden | 6 +- ...esh-gateway-custom-addresses.latest.golden | 5 - ...teway-default-service-subset.latest.golden | 5 - ...mesh-gateway-hash-lb-ignored.latest.golden | 5 - ...teway-ignore-extra-resolvers.latest.golden | 5 - ...rmation-in-federation-states.latest.golden | 5 - .../mesh-gateway-no-services.latest.golden | 5 - ...gateway-non-hash-lb-injected.latest.golden | 5 - ...rmation-in-federation-states.latest.golden | 5 - ...ateway-peering-control-plane.latest.golden | 6 +- ...mesh-gateway-service-subsets.latest.golden | 5 - ...esh-gateway-service-subsets2.latest.golden | 5 - ...esh-gateway-service-timeouts.latest.golden | 5 - ...esh-gateway-tagged-addresses.latest.golden | 5 - .../mesh-gateway-tcp-keepalives.latest.golden | 5 - ...ing-federation-control-plane.latest.golden | 5 - ...eway-using-federation-states.latest.golden | 5 - ...ed-services-http-with-router.latest.golden | 6 +- ...xported-peered-services-http.latest.golden | 6 +- ...ith-exported-peered-services.latest.golden | 6 +- ...ith-imported-peered-services.latest.golden | 6 +- ...through-mesh-gateway-enabled.latest.golden | 6 +- .../secrets/mesh-gateway.latest.golden | 5 - ...itter-with-resolver-redirect.latest.golden | 5 - .../secrets/telemetry-collector.latest.golden | 6 +- ...-custom-and-tagged-addresses.latest.golden | 5 - ...ateway-custom-trace-listener.latest.golden | 5 - ...teway-default-service-subset.latest.golden | 5 - ...way-hostname-service-subsets.latest.golden | 5 - ...teway-http2-upstream-subsets.latest.golden | 5 - ...ating-gateway-http2-upstream.latest.golden | 5 - ...teway-ignore-extra-resolvers.latest.golden | 5 - ...y-lb-config-no-hash-policies.latest.golden | 5 - ...erminating-gateway-lb-config.latest.golden | 5 - ...minating-gateway-no-api-cert.latest.golden | 5 - ...minating-gateway-no-services.latest.golden | 5 - ...ting-gateway-service-subsets.latest.golden | 5 - .../terminating-gateway-sni.latest.golden | 5 - ...ating-gateway-tcp-keepalives.latest.golden | 5 - ...teway-with-peer-trust-bundle.latest.golden | 5 - ...h-tls-incoming-cipher-suites.latest.golden | 5 - ...ith-tls-incoming-max-version.latest.golden | 5 - ...ith-tls-incoming-min-version.latest.golden | 5 - .../secrets/terminating-gateway.latest.golden | 5 - ...xy-catalog-destinations-only.latest.golden | 5 - ...arent-proxy-destination-http.latest.golden | 6 +- ...ransparent-proxy-destination.latest.golden | 6 +- ...roxy-dial-instances-directly.latest.golden | 5 - ...nsparent-proxy-http-upstream.latest.golden | 5 - ...ng-gateway-destinations-only.latest.golden | 6 +- ...nt-proxy-terminating-gateway.latest.golden | 5 - ...-proxy-with-peered-upstreams.latest.golden | 6 +- ...h-resolver-redirect-upstream.latest.golden | 5 - .../secrets/transparent-proxy.latest.golden | 6 +- ...meout-ms-tproxy-http-peering.latest.golden | 5 - agent/xds/testing.go | 2 +- .../validateupstream_test.go | 2 +- agent/xds/xds.go | 2 +- agent/xds/xds_protocol_helpers_test.go | 51 +- agent/xds/z_xds_packages_test.go | 2 +- agent/xdsv2/cluster_resources.go | 405 - agent/xdsv2/endpoint_resources.go | 46 - agent/xdsv2/listener_resources.go | 1142 -- agent/xdsv2/rbac_resources.go | 486 - agent/xdsv2/resources.go | 91 - agent/xdsv2/resources_test.go | 182 - agent/xdsv2/route_resources.go | 546 - ...cit-destinations-tproxy-default-bar.golden | 116 - ...destinations-tproxy-default-default.golden | 116 - ...xplicit-destinations-tproxy-foo-bar.golden | 116 - ...cit-destinations-tproxy-foo-default.golden | 116 - .../l4-multi-destination-default-bar.golden | 217 - ...4-multi-destination-default-default.golden | 217 - .../l4-multi-destination-foo-bar.golden | 217 - .../l4-multi-destination-foo-default.golden | 217 - ...cit-destinations-tproxy-default-bar.golden | 116 - ...destinations-tproxy-default-default.golden | 116 - ...mplicit-destinations-tproxy-foo-bar.golden | 116 - ...cit-destinations-tproxy-foo-default.golden | 116 - ...on-ip-port-bind-address-default-bar.golden | 115 - ...p-port-bind-address-default-default.golden | 115 - ...nation-ip-port-bind-address-foo-bar.golden | 115 - ...on-ip-port-bind-address-foo-default.golden | 115 - ...nix-socket-bind-address-default-bar.golden | 58 - ...socket-bind-address-default-default.golden | 58 - ...on-unix-socket-bind-address-foo-bar.golden | 58 - ...nix-socket-bind-address-foo-default.golden | 58 - ...icit-destination-tproxy-default-bar.golden | 65 - ...-destination-tproxy-default-default.golden | 65 - ...implicit-destination-tproxy-foo-bar.golden | 65 - ...icit-destination-tproxy-foo-default.golden | 65 - ...mixed-multi-destination-default-bar.golden | 285 - ...d-multi-destination-default-default.golden | 285 - .../mixed-multi-destination-foo-bar.golden | 285 - ...mixed-multi-destination-foo-default.golden | 285 - ...cit-destinations-tproxy-default-bar.golden | 320 - ...destinations-tproxy-default-default.golden | 320 - ...mplicit-destinations-tproxy-foo-bar.golden | 320 - ...cit-destinations-tproxy-foo-default.golden | 320 - ...icit-destination-tproxy-default-bar.golden | 167 - ...-destination-tproxy-default-default.golden | 167 - ...implicit-destination-tproxy-foo-bar.golden | 167 - ...icit-destination-tproxy-foo-default.golden | 167 - ...ltiple-workloads-tproxy-default-bar.golden | 167 - ...le-workloads-tproxy-default-default.golden | 167 - ...h-multiple-workloads-tproxy-foo-bar.golden | 167 - ...ltiple-workloads-tproxy-foo-default.golden | 167 - .../source/l7-expose-paths-default-bar.golden | 87 - .../l7-expose-paths-default-default.golden | 87 - .../source/l7-expose-paths-foo-bar.golden | 87 - .../source/l7-expose-paths-foo-default.golden | 87 - ...and-inbound-connections-default-bar.golden | 127 - ...inbound-connections-default-default.golden | 127 - ...cal-and-inbound-connections-foo-bar.golden | 127 - ...and-inbound-connections-foo-default.golden | 127 - ...ses-with-specific-ports-default-bar.golden | 119 - ...with-specific-ports-default-default.golden | 119 - ...dresses-with-specific-ports-foo-bar.golden | 119 - ...ses-with-specific-ports-foo-default.golden | 119 - ...addresses-without-ports-default-bar.golden | 119 - ...esses-without-ports-default-default.golden | 119 - ...oad-addresses-without-ports-foo-bar.golden | 119 - ...addresses-without-ports-foo-default.golden | 119 - ...ses-with-specific-ports-default-bar.golden | 55 - ...with-specific-ports-default-default.golden | 55 - ...dresses-with-specific-ports-foo-bar.golden | 55 - ...ses-with-specific-ports-foo-default.golden | 55 - ...addresses-without-ports-default-bar.golden | 55 - ...esses-without-ports-default-default.golden | 55 - ...oad-addresses-without-ports-foo-bar.golden | 55 - ...addresses-without-ports-foo-default.golden | 55 - ...oad-with-only-mesh-port-default-bar.golden | 12 - ...with-only-mesh-port-default-default.golden | 12 - ...orkload-with-only-mesh-port-foo-bar.golden | 12 - ...oad-with-only-mesh-port-foo-default.golden | 12 - ...ses-with-specific-ports-default-bar.golden | 63 - ...with-specific-ports-default-default.golden | 63 - ...dresses-with-specific-ports-foo-bar.golden | 63 - ...ses-with-specific-ports-foo-default.golden | 63 - ...addresses-without-ports-default-bar.golden | 95 - ...esses-without-ports-default-default.golden | 95 - ...oad-addresses-without-ports-foo-bar.golden | 95 - ...addresses-without-ports-foo-default.golden | 95 - ...d-address-without-ports-default-bar.golden | 119 - ...dress-without-ports-default-default.golden | 119 - ...kload-address-without-ports-foo-bar.golden | 119 - ...d-address-without-ports-foo-default.golden | 119 - ...destinations-tproxy-default-default.golden | 49 - ...xplicit-destinations-tproxy-foo-bar.golden | 49 - ...cit-destinations-tproxy-foo-default.golden | 49 - .../l4-multi-destination-default-bar.golden | 91 - ...4-multi-destination-default-default.golden | 91 - .../l4-multi-destination-foo-bar.golden | 91 - .../l4-multi-destination-foo-default.golden | 91 - ...cit-destinations-tproxy-default-bar.golden | 49 - ...destinations-tproxy-default-default.golden | 49 - ...mplicit-destinations-tproxy-foo-bar.golden | 49 - ...cit-destinations-tproxy-foo-default.golden | 49 - ...on-ip-port-bind-address-default-bar.golden | 49 - ...p-port-bind-address-default-default.golden | 49 - ...nation-ip-port-bind-address-foo-bar.golden | 49 - ...on-ip-port-bind-address-foo-default.golden | 49 - ...nix-socket-bind-address-default-bar.golden | 28 - ...socket-bind-address-default-default.golden | 28 - ...on-unix-socket-bind-address-foo-bar.golden | 28 - ...nix-socket-bind-address-foo-default.golden | 28 - ...icit-destination-tproxy-default-bar.golden | 28 - ...-destination-tproxy-default-default.golden | 28 - ...implicit-destination-tproxy-foo-bar.golden | 28 - ...icit-destination-tproxy-foo-default.golden | 28 - ...mixed-multi-destination-default-bar.golden | 91 - ...d-multi-destination-default-default.golden | 91 - .../mixed-multi-destination-foo-bar.golden | 91 - ...mixed-multi-destination-foo-default.golden | 91 - ...cit-destinations-tproxy-default-bar.golden | 133 - ...destinations-tproxy-default-default.golden | 133 - ...mplicit-destinations-tproxy-foo-bar.golden | 133 - ...cit-destinations-tproxy-foo-default.golden | 133 - ...icit-destination-tproxy-default-bar.golden | 70 - ...-destination-tproxy-default-default.golden | 70 - ...implicit-destination-tproxy-foo-bar.golden | 70 - ...icit-destination-tproxy-foo-default.golden | 70 - ...ltiple-workloads-tproxy-default-bar.golden | 70 - ...le-workloads-tproxy-default-default.golden | 70 - ...h-multiple-workloads-tproxy-foo-bar.golden | 70 - ...ltiple-workloads-tproxy-foo-default.golden | 70 - .../source/l7-expose-paths-default-bar.golden | 5 - .../l7-expose-paths-default-default.golden | 5 - .../source/l7-expose-paths-foo-bar.golden | 5 - .../source/l7-expose-paths-foo-default.golden | 5 - ...and-inbound-connections-default-bar.golden | 5 - ...inbound-connections-default-default.golden | 5 - ...cal-and-inbound-connections-foo-bar.golden | 5 - ...and-inbound-connections-foo-default.golden | 5 - ...ses-with-specific-ports-default-bar.golden | 5 - ...with-specific-ports-default-default.golden | 5 - ...dresses-with-specific-ports-foo-bar.golden | 5 - ...ses-with-specific-ports-foo-default.golden | 5 - ...addresses-without-ports-default-bar.golden | 5 - ...esses-without-ports-default-default.golden | 5 - ...oad-addresses-without-ports-foo-bar.golden | 5 - ...addresses-without-ports-foo-default.golden | 5 - ...ses-with-specific-ports-default-bar.golden | 5 - ...with-specific-ports-default-default.golden | 5 - ...dresses-with-specific-ports-foo-bar.golden | 5 - ...ses-with-specific-ports-foo-default.golden | 5 - ...addresses-without-ports-default-bar.golden | 5 - ...esses-without-ports-default-default.golden | 5 - ...oad-addresses-without-ports-foo-bar.golden | 5 - ...addresses-without-ports-foo-default.golden | 5 - ...oad-with-only-mesh-port-default-bar.golden | 5 - ...with-only-mesh-port-default-default.golden | 5 - ...orkload-with-only-mesh-port-foo-bar.golden | 5 - ...oad-with-only-mesh-port-foo-default.golden | 5 - ...ses-with-specific-ports-default-bar.golden | 5 - ...with-specific-ports-default-default.golden | 5 - ...dresses-with-specific-ports-foo-bar.golden | 5 - ...ses-with-specific-ports-foo-default.golden | 5 - ...addresses-without-ports-default-bar.golden | 5 - ...esses-without-ports-default-default.golden | 5 - ...oad-addresses-without-ports-foo-bar.golden | 5 - ...addresses-without-ports-foo-default.golden | 5 - ...d-address-without-ports-default-bar.golden | 5 - ...dress-without-ports-default-default.golden | 5 - ...kload-address-without-ports-foo-bar.golden | 5 - ...d-address-without-ports-foo-default.golden | 5 - ...cit-destinations-tproxy-default-bar.golden | 90 - ...destinations-tproxy-default-default.golden | 90 - ...xplicit-destinations-tproxy-foo-bar.golden | 90 - ...cit-destinations-tproxy-foo-default.golden | 90 - .../l4-multi-destination-default-bar.golden | 137 - ...4-multi-destination-default-default.golden | 137 - .../l4-multi-destination-foo-bar.golden | 137 - .../l4-multi-destination-foo-default.golden | 137 - ...cit-destinations-tproxy-default-bar.golden | 86 - ...destinations-tproxy-default-default.golden | 86 - ...mplicit-destinations-tproxy-foo-bar.golden | 86 - ...cit-destinations-tproxy-foo-default.golden | 86 - ...on-ip-port-bind-address-default-bar.golden | 47 - ...p-port-bind-address-default-default.golden | 47 - ...nation-ip-port-bind-address-foo-bar.golden | 47 - ...on-ip-port-bind-address-foo-default.golden | 47 - ...nix-socket-bind-address-default-bar.golden | 32 - ...socket-bind-address-default-default.golden | 32 - ...on-unix-socket-bind-address-foo-bar.golden | 32 - ...nix-socket-bind-address-foo-default.golden | 32 - ...icit-destination-tproxy-default-bar.golden | 61 - ...-destination-tproxy-default-default.golden | 61 - ...implicit-destination-tproxy-foo-bar.golden | 61 - ...icit-destination-tproxy-foo-default.golden | 61 - ...mixed-multi-destination-default-bar.golden | 119 - ...d-multi-destination-default-default.golden | 119 - .../mixed-multi-destination-foo-bar.golden | 119 - ...mixed-multi-destination-foo-default.golden | 119 - ...cit-destinations-tproxy-default-bar.golden | 222 - ...destinations-tproxy-default-default.golden | 222 - ...mplicit-destinations-tproxy-foo-bar.golden | 222 - ...cit-destinations-tproxy-foo-default.golden | 222 - ...icit-destination-tproxy-default-bar.golden | 125 - ...-destination-tproxy-default-default.golden | 125 - ...implicit-destination-tproxy-foo-bar.golden | 125 - ...icit-destination-tproxy-foo-default.golden | 125 - ...ltiple-workloads-tproxy-default-bar.golden | 125 - ...le-workloads-tproxy-default-default.golden | 125 - ...h-multiple-workloads-tproxy-foo-bar.golden | 125 - ...ltiple-workloads-tproxy-foo-default.golden | 125 - .../source/l7-expose-paths-default-bar.golden | 201 - .../l7-expose-paths-default-default.golden | 201 - .../source/l7-expose-paths-foo-bar.golden | 201 - .../source/l7-expose-paths-foo-default.golden | 201 - ...and-inbound-connections-default-bar.golden | 309 - ...inbound-connections-default-default.golden | 309 - ...cal-and-inbound-connections-foo-bar.golden | 309 - ...and-inbound-connections-foo-default.golden | 309 - ...ses-with-specific-ports-default-bar.golden | 443 - ...with-specific-ports-default-default.golden | 443 - ...dresses-with-specific-ports-foo-bar.golden | 443 - ...ses-with-specific-ports-foo-default.golden | 443 - ...addresses-without-ports-default-bar.golden | 359 - ...esses-without-ports-default-default.golden | 359 - ...oad-addresses-without-ports-foo-bar.golden | 359 - ...addresses-without-ports-foo-default.golden | 359 - ...ses-with-specific-ports-default-bar.golden | 128 - ...with-specific-ports-default-default.golden | 128 - ...dresses-with-specific-ports-foo-bar.golden | 128 - ...ses-with-specific-ports-foo-default.golden | 128 - ...addresses-without-ports-default-bar.golden | 128 - ...esses-without-ports-default-default.golden | 128 - ...oad-addresses-without-ports-foo-bar.golden | 128 - ...addresses-without-ports-foo-default.golden | 128 - ...oad-with-only-mesh-port-default-bar.golden | 40 - ...with-only-mesh-port-default-default.golden | 40 - ...orkload-with-only-mesh-port-foo-bar.golden | 40 - ...oad-with-only-mesh-port-foo-default.golden | 40 - ...ses-with-specific-ports-default-bar.golden | 206 - ...with-specific-ports-default-default.golden | 206 - ...dresses-with-specific-ports-foo-bar.golden | 206 - ...ses-with-specific-ports-foo-default.golden | 206 - ...addresses-without-ports-default-bar.golden | 309 - ...esses-without-ports-default-default.golden | 309 - ...oad-addresses-without-ports-foo-bar.golden | 309 - ...addresses-without-ports-foo-default.golden | 309 - ...d-address-without-ports-default-bar.golden | 359 - ...dress-without-ports-default-default.golden | 359 - ...kload-address-without-ports-foo-bar.golden | 359 - ...d-address-without-ports-foo-default.golden | 359 - ...cit-destinations-tproxy-default-bar.golden | 5 - ...destinations-tproxy-default-default.golden | 5 - ...xplicit-destinations-tproxy-foo-bar.golden | 5 - ...cit-destinations-tproxy-foo-default.golden | 5 - .../l4-multi-destination-default-bar.golden | 5 - ...4-multi-destination-default-default.golden | 5 - .../l4-multi-destination-foo-bar.golden | 5 - .../l4-multi-destination-foo-default.golden | 5 - ...cit-destinations-tproxy-default-bar.golden | 5 - ...destinations-tproxy-default-default.golden | 5 - ...mplicit-destinations-tproxy-foo-bar.golden | 5 - ...cit-destinations-tproxy-foo-default.golden | 5 - ...on-ip-port-bind-address-default-bar.golden | 5 - ...p-port-bind-address-default-default.golden | 5 - ...nation-ip-port-bind-address-foo-bar.golden | 5 - ...on-ip-port-bind-address-foo-default.golden | 5 - ...nix-socket-bind-address-default-bar.golden | 5 - ...socket-bind-address-default-default.golden | 5 - ...on-unix-socket-bind-address-foo-bar.golden | 5 - ...nix-socket-bind-address-foo-default.golden | 5 - ...icit-destination-tproxy-default-bar.golden | 5 - ...-destination-tproxy-default-default.golden | 5 - ...implicit-destination-tproxy-foo-bar.golden | 5 - ...icit-destination-tproxy-foo-default.golden | 5 - ...mixed-multi-destination-default-bar.golden | 67 - ...d-multi-destination-default-default.golden | 67 - .../mixed-multi-destination-foo-bar.golden | 67 - ...mixed-multi-destination-foo-default.golden | 67 - ...cit-destinations-tproxy-default-bar.golden | 53 - ...destinations-tproxy-default-default.golden | 53 - ...mplicit-destinations-tproxy-foo-bar.golden | 53 - ...cit-destinations-tproxy-foo-default.golden | 53 - ...icit-destination-tproxy-default-bar.golden | 30 - ...-destination-tproxy-default-default.golden | 30 - ...implicit-destination-tproxy-foo-bar.golden | 30 - ...icit-destination-tproxy-foo-default.golden | 30 - ...ltiple-workloads-tproxy-default-bar.golden | 30 - ...le-workloads-tproxy-default-default.golden | 30 - ...h-multiple-workloads-tproxy-foo-bar.golden | 30 - ...ltiple-workloads-tproxy-foo-default.golden | 30 - .../source/l7-expose-paths-default-bar.golden | 5 - .../l7-expose-paths-default-default.golden | 5 - .../source/l7-expose-paths-foo-bar.golden | 5 - .../source/l7-expose-paths-foo-default.golden | 5 - ...and-inbound-connections-default-bar.golden | 5 - ...inbound-connections-default-default.golden | 5 - ...cal-and-inbound-connections-foo-bar.golden | 5 - ...and-inbound-connections-foo-default.golden | 5 - ...ses-with-specific-ports-default-bar.golden | 5 - ...with-specific-ports-default-default.golden | 5 - ...dresses-with-specific-ports-foo-bar.golden | 5 - ...ses-with-specific-ports-foo-default.golden | 5 - ...addresses-without-ports-default-bar.golden | 5 - ...esses-without-ports-default-default.golden | 5 - ...oad-addresses-without-ports-foo-bar.golden | 5 - ...addresses-without-ports-foo-default.golden | 5 - ...ses-with-specific-ports-default-bar.golden | 5 - ...with-specific-ports-default-default.golden | 5 - ...dresses-with-specific-ports-foo-bar.golden | 5 - ...ses-with-specific-ports-foo-default.golden | 5 - ...addresses-without-ports-default-bar.golden | 5 - ...esses-without-ports-default-default.golden | 5 - ...oad-addresses-without-ports-foo-bar.golden | 5 - ...addresses-without-ports-foo-default.golden | 5 - ...oad-with-only-mesh-port-default-bar.golden | 5 - ...with-only-mesh-port-default-default.golden | 5 - ...orkload-with-only-mesh-port-foo-bar.golden | 5 - ...oad-with-only-mesh-port-foo-default.golden | 5 - ...ses-with-specific-ports-default-bar.golden | 5 - ...with-specific-ports-default-default.golden | 5 - ...dresses-with-specific-ports-foo-bar.golden | 5 - ...ses-with-specific-ports-foo-default.golden | 5 - ...addresses-without-ports-default-bar.golden | 5 - ...esses-without-ports-default-default.golden | 5 - ...oad-addresses-without-ports-foo-bar.golden | 5 - ...addresses-without-ports-foo-default.golden | 5 - ...d-address-without-ports-default-bar.golden | 5 - ...dress-without-ports-default-default.golden | 5 - ...kload-address-without-ports-foo-bar.golden | 5 - ...d-address-without-ports-foo-default.golden | 5 - api/LICENSE | 365 - api/acl.go | 119 - api/agent.go | 18 - api/agent_test.go | 59 +- api/api_test.go | 2 +- api/ce_test.go | 1 + api/config_entry.go | 54 +- api/config_entry_discoverychain.go | 7 +- api/config_entry_discoverychain_test.go | 62 - api/config_entry_exports.go | 4 +- api/config_entry_gateways.go | 40 - api/config_entry_gateways_test.go | 147 - api/config_entry_routes.go | 40 +- api/config_entry_routes_test.go | 137 - api/config_entry_status.go | 19 - api/exported_services.go | 49 - api/go.mod | 4 +- api/go.sum | 4 +- api/lock_test.go | 5 +- api/namespace_test.go | 1 + api/operator_raft.go | 7 +- api/operator_raft_test.go | 187 +- api/watch/funcs_test.go | 104 - buf.work.yaml | 2 +- build-support/docker/Build-Go.dockerfile | 2 +- build-support/docker/Build-UI.dockerfile | 5 +- .../docker/Consul-Dev-Dbg.dockerfile | 2 +- .../docker/Consul-Dev-Multiarch.dockerfile | 2 +- build-support/docker/Consul-Dev.dockerfile | 2 +- build-support/functions/00-vars.sh | 2 +- build-support/functions/10-util.sh | 2 +- build-support/functions/20-build.sh | 2 +- build-support/functions/30-release.sh | 2 +- build-support/scripts/build-date.sh | 2 +- build-support/scripts/build-docker.sh | 2 +- .../scripts/check-allowed-imports.sh | 124 - build-support/scripts/devtools.sh | 104 +- .../scripts/envoy-library-references.sh | 2 +- build-support/scripts/functions.sh | 2 +- build-support/scripts/protobuf.sh | 12 +- build-support/scripts/release.sh | 2 +- build-support/scripts/version.sh | 2 +- .../windows/Dockerfile-consul-dev-windows | 4 - .../windows/Dockerfile-consul-local-windows | 52 - .../windows/Dockerfile-openzipkin-windows | 12 - .../windows/build-consul-dev-image.sh | 8 - .../windows/build-consul-local-images.sh | 95 - .../windows/build-test-sds-server-image.sh | 8 - build-support/windows/windows-test.md | 119 - command/acl/acl.go | 2 +- command/acl/acl_helpers.go | 78 +- command/acl/acl_test.go | 2 +- command/acl/agenttokens/agent_tokens.go | 12 +- command/acl/agenttokens/agent_tokens_test.go | 2 +- command/acl/authmethod/authmethod.go | 2 +- .../authmethod/create/authmethod_create.go | 2 +- .../authmethod/create/authmethod_create_ce.go | 3 +- .../create/authmethod_create_test.go | 2 +- .../authmethod/delete/authmethod_delete.go | 2 +- .../delete/authmethod_delete_test.go | 2 +- command/acl/authmethod/formatter.go | 2 +- .../acl/authmethod/list/authmethod_list.go | 2 +- .../authmethod/list/authmethod_list_test.go | 2 +- .../acl/authmethod/read/authmethod_read.go | 2 +- .../authmethod/read/authmethod_read_test.go | 2 +- .../authmethod/update/authmethod_update.go | 2 +- .../authmethod/update/authmethod_update_ce.go | 3 +- .../update/authmethod_update_test.go | 2 +- command/acl/bindingrule/bindingrule.go | 2 +- .../bindingrule/create/bindingrule_create.go | 33 +- .../create/bindingrule_create_test.go | 55 +- .../bindingrule/delete/bindingrule_delete.go | 2 +- .../delete/bindingrule_delete_test.go | 2 +- command/acl/bindingrule/formatter.go | 9 +- .../acl/bindingrule/list/bindingrule_list.go | 2 +- .../bindingrule/list/bindingrule_list_test.go | 2 +- .../acl/bindingrule/read/bindingrule_read.go | 2 +- .../bindingrule/read/bindingrule_read_test.go | 2 +- .../bindingrule/update/bindingrule_update.go | 39 +- .../update/bindingrule_update_test.go | 260 +- command/acl/bootstrap/bootstrap.go | 2 +- command/acl/bootstrap/bootstrap_test.go | 2 +- command/acl/policy/create/policy_create.go | 4 +- .../acl/policy/create/policy_create_test.go | 2 +- command/acl/policy/delete/policy_delete.go | 2 +- .../acl/policy/delete/policy_delete_test.go | 2 +- command/acl/policy/formatter.go | 2 +- command/acl/policy/list/policy_list.go | 2 +- command/acl/policy/list/policy_list_test.go | 2 +- command/acl/policy/policy.go | 2 +- command/acl/policy/read/policy_read.go | 2 +- command/acl/policy/read/policy_read_test.go | 2 +- command/acl/policy/update/policy_update.go | 2 +- .../acl/policy/update/policy_update_test.go | 2 +- command/acl/role/create/role_create.go | 48 +- command/acl/role/create/role_create_test.go | 18 +- command/acl/role/delete/role_delete.go | 2 +- command/acl/role/delete/role_delete_test.go | 2 +- command/acl/role/formatter.go | 31 +- command/acl/role/formatter_test.go | 16 +- command/acl/role/list/role_list.go | 2 +- command/acl/role/list/role_list_test.go | 2 +- command/acl/role/read/role_read.go | 2 +- command/acl/role/read/role_read_test.go | 2 +- command/acl/role/role.go | 2 +- .../testdata/FormatRole/complex.json.golden | 18 - .../FormatRole/complex.pretty-meta.golden | 7 - .../testdata/FormatRole/complex.pretty.golden | 7 - .../FormatRoleList/complex.json.golden | 14 - .../FormatRoleList/complex.pretty-meta.golden | 7 - .../FormatRoleList/complex.pretty.golden | 7 - command/acl/role/update/role_update.go | 60 +- command/acl/role/update/role_update_test.go | 110 +- command/acl/templatedpolicy/formatter.go | 143 - .../acl/templatedpolicy/formatter_ce_test.go | 16 - command/acl/templatedpolicy/formatter_test.go | 140 - .../list/templated_policy_list.go | 101 - .../list/templated_policy_list_test.go | 102 - .../preview/templated_policy_preview.go | 134 - .../preview/templated_policy_preview_test.go | 204 - .../read/templated_policy_read.go | 119 - .../read/templated_policy_read_test.go | 134 - .../acl/templatedpolicy/templated_policy.go | 50 - .../ce/dns-templated-policy.json.golden | 6 - .../dns-templated-policy.pretty-meta.golden | 16 - .../ce/dns-templated-policy.pretty.golden | 5 - .../ce/node-templated-policy.json.golden | 6 - .../node-templated-policy.pretty-meta.golden | 29 - .../ce/node-templated-policy.pretty.golden | 6 - .../nomad-client-templated-policy.json.golden | 6 - ...client-templated-policy.pretty-meta.golden | 18 - ...omad-client-templated-policy.pretty.golden | 5 - .../nomad-server-templated-policy.json.golden | 6 - ...server-templated-policy.pretty-meta.golden | 17 - ...omad-server-templated-policy.pretty.golden | 5 - .../ce/service-templated-policy.json.golden | 6 - ...ervice-templated-policy.pretty-meta.golden | 35 - .../ce/service-templated-policy.pretty.golden | 6 - .../ce/list.json.golden | 20 - .../ce/list.pretty.golden | 3 - command/acl/token/clone/token_clone.go | 2 +- command/acl/token/clone/token_clone_test.go | 11 +- command/acl/token/create/token_create.go | 60 +- command/acl/token/create/token_create_test.go | 38 +- command/acl/token/delete/token_delete.go | 2 +- command/acl/token/delete/token_delete_test.go | 2 +- command/acl/token/formatter.go | 76 +- command/acl/token/formatter_ce_test.go | 3 +- command/acl/token/formatter_test.go | 47 +- command/acl/token/list/token_list.go | 2 +- command/acl/token/list/token_list_test.go | 2 +- command/acl/token/read/token_read.go | 2 +- command/acl/token/read/token_read_test.go | 2 +- .../testdata/FormatToken/complex.json.golden | 18 - .../FormatToken/complex.pretty-meta.golden | 7 - .../FormatToken/complex.pretty.golden | 7 - .../ce/complex.json.golden | 17 - .../ce/complex.pretty-meta.golden | 43 +- .../ce/complex.pretty.golden | 43 +- .../FormatTokenList/complex.json.golden | 17 - .../complex.pretty-meta.golden | 7 - .../FormatTokenList/complex.pretty.golden | 7 - command/acl/token/token.go | 2 +- command/acl/token/update/token_update.go | 80 +- command/acl/token/update/token_update_test.go | 47 +- command/agent/agent.go | 9 +- command/agent/agent_test.go | 2 +- command/agent/startup_logger.go | 2 +- command/catalog/catalog.go | 2 +- command/catalog/catalog_test.go | 2 +- command/catalog/helpers.go | 2 +- command/catalog/helpers_ce.go | 3 +- .../list/dc/catalog_list_datacenters.go | 2 +- .../list/dc/catalog_list_datacenters_test.go | 2 +- .../catalog/list/nodes/catalog_list_nodes.go | 2 +- .../list/nodes/catalog_list_nodes_test.go | 2 +- .../list/services/catalog_list_services.go | 2 +- .../services/catalog_list_services_test.go | 2 +- command/cli/cli.go | 2 +- command/cli/formatting.go | 2 +- command/config/config.go | 2 +- command/config/delete/config_delete.go | 2 +- command/config/delete/config_delete_test.go | 2 +- command/config/list/config_list.go | 2 +- command/config/list/config_list_test.go | 2 +- command/config/read/config_read.go | 2 +- command/config/read/config_read_test.go | 2 +- command/config/write/config_write.go | 68 +- command/config/write/config_write_test.go | 2 +- command/connect/ca/ca.go | 2 +- command/connect/ca/ca_test.go | 2 +- command/connect/ca/get/connect_ca_get.go | 2 +- command/connect/ca/get/connect_ca_get_test.go | 2 +- command/connect/ca/set/connect_ca_set.go | 2 +- command/connect/ca/set/connect_ca_set_test.go | 2 +- command/connect/connect.go | 2 +- command/connect/connect_test.go | 2 +- command/connect/envoy/bootstrap_config.go | 2 +- .../connect/envoy/bootstrap_config_test.go | 2 +- command/connect/envoy/bootstrap_tpl.go | 4 +- command/connect/envoy/envoy.go | 4 +- command/connect/envoy/envoy_ce_test.go | 3 +- command/connect/envoy/envoy_test.go | 2 +- command/connect/envoy/exec.go | 2 +- command/connect/envoy/exec_supported.go | 57 - command/connect/envoy/exec_test.go | 6 +- command/connect/envoy/exec_unix.go | 54 +- command/connect/envoy/exec_unsupported.go | 5 +- command/connect/envoy/exec_windows.go | 111 - command/connect/envoy/flags.go | 2 +- command/connect/envoy/flags_test.go | 2 +- .../connect_envoy_pipe-bootstrap.go | 2 +- .../connect_envoy_pipe-bootstrap_test.go | 2 +- command/connect/expose/expose.go | 2 +- command/connect/expose/expose_test.go | 2 +- command/connect/proxy/flag_upstreams.go | 2 +- command/connect/proxy/flag_upstreams_test.go | 2 +- command/connect/proxy/proxy.go | 2 +- command/connect/proxy/proxy_test.go | 2 +- command/connect/proxy/register.go | 2 +- command/connect/proxy/register_test.go | 2 +- .../redirecttraffic/redirect_traffic.go | 2 +- .../redirecttraffic/redirect_traffic_test.go | 2 +- command/debug/debug.go | 2 +- command/debug/debug_test.go | 2 +- command/event/event.go | 2 +- command/event/event_test.go | 2 +- command/exec/exec.go | 2 +- command/exec/exec_test.go | 2 +- command/flags/config.go | 2 +- command/flags/config_test.go | 2 +- command/flags/flag_map_value.go | 2 +- command/flags/flag_map_value_test.go | 2 +- command/flags/flag_slice_value.go | 2 +- command/flags/flag_slice_value_test.go | 2 +- command/flags/http.go | 13 +- command/flags/http_test.go | 2 +- command/flags/merge.go | 2 +- command/flags/usage.go | 2 +- command/forceleave/forceleave.go | 2 +- command/forceleave/forceleave_test.go | 2 +- command/helpers/decode_shim.go | 2 +- command/helpers/helpers.go | 24 +- command/helpers/helpers_test.go | 2 +- command/info/info.go | 2 +- command/info/info_test.go | 2 +- command/intention/check/check.go | 2 +- command/intention/check/check_test.go | 2 +- command/intention/create/create.go | 2 +- command/intention/create/create_test.go | 2 +- command/intention/delete/delete.go | 2 +- command/intention/delete/delete_test.go | 2 +- command/intention/format.go | 2 +- command/intention/get/get.go | 2 +- command/intention/get/get_test.go | 2 +- command/intention/helpers.go | 2 +- command/intention/helpers_test.go | 2 +- command/intention/intention.go | 2 +- command/intention/intention_test.go | 2 +- command/intention/list/intention_list.go | 2 +- command/intention/list/intention_list_test.go | 2 +- command/intention/match/match.go | 2 +- command/intention/match/match_test.go | 2 +- command/join/join.go | 2 +- command/join/join_test.go | 2 +- command/keygen/keygen.go | 2 +- command/keygen/keygen_test.go | 2 +- command/keyring/keyring.go | 2 +- command/keyring/keyring_test.go | 2 +- command/kv/del/kv_delete.go | 2 +- command/kv/del/kv_delete_test.go | 2 +- command/kv/exp/kv_export.go | 2 +- command/kv/exp/kv_export_test.go | 2 +- command/kv/get/kv_get.go | 2 +- command/kv/get/kv_get_test.go | 2 +- command/kv/imp/kv_import.go | 2 +- command/kv/imp/kv_import_test.go | 2 +- command/kv/impexp/kvimpexp.go | 2 +- command/kv/kv.go | 2 +- command/kv/kv_test.go | 2 +- command/kv/put/kv_put.go | 2 +- command/kv/put/kv_put_test.go | 2 +- command/leave/leave.go | 2 +- command/leave/leave_test.go | 2 +- command/lock/lock.go | 2 +- command/lock/lock_test.go | 2 +- command/lock/util_unix.go | 3 +- command/lock/util_windows.go | 3 +- command/login/aws.go | 2 +- command/login/login.go | 2 +- command/login/login_ce.go | 3 +- command/login/login_test.go | 2 +- command/logout/logout.go | 2 +- command/logout/logout_test.go | 2 +- command/maint/maint.go | 2 +- command/maint/maint_test.go | 2 +- command/members/members.go | 2 +- command/members/members_test.go | 2 +- command/monitor/monitor.go | 2 +- command/monitor/monitor_test.go | 2 +- .../autopilot/get/operator_autopilot_get.go | 2 +- .../get/operator_autopilot_get_test.go | 2 +- .../operator/autopilot/operator_autopilot.go | 2 +- .../autopilot/operator_autopilot_test.go | 2 +- .../autopilot/set/operator_autopilot_set.go | 2 +- .../set/operator_autopilot_set_test.go | 2 +- command/operator/autopilot/state/formatter.go | 2 +- .../state/operator_autopilot_state.go | 2 +- .../state/operator_autopilot_state_test.go | 2 +- command/operator/operator.go | 2 +- command/operator/operator_test.go | 2 +- .../raft/listpeers/operator_raft_list.go | 2 +- .../raft/listpeers/operator_raft_list_test.go | 2 +- command/operator/raft/operator_raft.go | 2 +- command/operator/raft/operator_raft_test.go | 2 +- .../raft/removepeer/operator_raft_remove.go | 2 +- .../removepeer/operator_raft_remove_test.go | 2 +- .../raft/transferleader/transfer_leader.go | 8 +- .../transferleader/transfer_leader_test.go | 2 +- .../usage/instances/usage_instances.go | 14 +- .../usage/instances/usage_instances_ce.go | 3 +- .../instances/usage_instances_ce_test.go | 3 +- .../usage/instances/usage_instances_test.go | 2 +- command/operator/usage/usage.go | 2 +- command/peering/delete/delete.go | 2 +- command/peering/delete/delete_test.go | 2 +- command/peering/establish/establish.go | 2 +- command/peering/establish/establish_test.go | 2 +- .../exportedservices/exported_services.go | 154 - .../exported_services_test.go | 216 - command/peering/generate/generate.go | 2 +- command/peering/generate/generate_test.go | 2 +- command/peering/list/list.go | 2 +- command/peering/list/list_test.go | 2 +- command/peering/peering.go | 6 +- command/peering/read/read.go | 2 +- command/peering/read/read_test.go | 2 +- command/registry.go | 24 +- command/registry_ce.go | 3 +- command/reload/reload.go | 2 +- command/reload/reload_test.go | 2 +- command/resource/apply/apply.go | 147 - command/resource/apply/apply_test.go | 243 - command/resource/client/client.go | 172 - command/resource/client/client_test.go | 147 - command/resource/client/config.go | 174 - command/resource/client/config_test.go | 60 - command/resource/client/grpc-flags.go | 90 - command/resource/client/grpc-flags_test.go | 104 - command/resource/client/helper.go | 299 - command/resource/client/helper_test.go | 95 - command/resource/client/resource-flags.go | 44 - command/resource/client/usage.go | 85 - command/resource/delete/delete.go | 160 - command/resource/delete/delete_test.go | 164 - command/resource/list/list.go | 189 - command/resource/list/list_test.go | 192 - command/resource/read/read.go | 168 - command/resource/read/read_test.go | 161 - command/resource/resource.go | 59 - command/resource/testdata/demo.hcl | 20 - command/resource/testdata/invalid.hcl | 20 - command/resource/testdata/invalid_type.hcl | 10 - command/resource/testdata/nested_data.hcl | 29 - command/rtt/rtt.go | 2 +- command/rtt/rtt_test.go | 2 +- command/services/config.go | 2 +- command/services/config_test.go | 2 +- command/services/deregister/deregister.go | 2 +- .../services/deregister/deregister_test.go | 2 +- .../exportedservices/exported_services.go | 176 - .../exported_services_test.go | 323 - command/services/register/register.go | 2 +- command/services/register/register_test.go | 12 +- command/services/services.go | 2 +- command/services/services_test.go | 2 +- command/snapshot/inspect/formatter.go | 2 +- command/snapshot/inspect/formatter_test.go | 2 +- command/snapshot/inspect/snapshot_inspect.go | 2 +- .../snapshot/inspect/snapshot_inspect_test.go | 2 +- command/snapshot/restore/snapshot_restore.go | 2 +- .../snapshot/restore/snapshot_restore_test.go | 2 +- command/snapshot/save/snapshot_save.go | 2 +- command/snapshot/save/snapshot_save_test.go | 2 +- command/snapshot/snapshot_command.go | 2 +- command/snapshot/snapshot_command_test.go | 2 +- command/tls/ca/create/tls_ca_create.go | 2 +- command/tls/ca/create/tls_ca_create_test.go | 2 +- command/tls/ca/tls_ca.go | 2 +- command/tls/ca/tls_ca_test.go | 2 +- command/tls/cert/create/tls_cert_create.go | 2 +- .../tls/cert/create/tls_cert_create_test.go | 2 +- command/tls/cert/tls_cert.go | 2 +- command/tls/cert/tls_cert_test.go | 2 +- command/tls/tls.go | 2 +- command/tls/tls_test.go | 2 +- .../troubleshoot/proxy/troubleshoot_proxy.go | 2 +- command/troubleshoot/troubleshoot.go | 2 +- command/troubleshoot/troubleshoot_test.go | 2 +- .../upstreams/troubleshoot_upstreams.go | 2 +- command/validate/validate.go | 2 +- command/validate/validate_test.go | 2 +- command/version/formatter.go | 2 +- command/version/formatter_test.go | 2 +- command/version/version.go | 2 +- command/version/version_test.go | 2 +- command/watch/watch.go | 2 +- command/watch/watch_test.go | 2 +- connect/certgen/certgen.go | 2 +- connect/example_test.go | 2 +- connect/proxy/config.go | 2 +- connect/proxy/config_test.go | 2 +- connect/proxy/conn.go | 2 +- connect/proxy/conn_test.go | 2 +- connect/proxy/listener.go | 2 +- connect/proxy/listener_test.go | 2 +- connect/proxy/proxy.go | 2 +- connect/proxy/proxy_test.go | 4 +- connect/proxy/testing.go | 2 +- connect/resolver.go | 2 +- connect/resolver_test.go | 2 +- connect/service.go | 2 +- connect/service_test.go | 4 +- connect/testing.go | 2 +- connect/tls.go | 4 +- connect/tls_test.go | 2 +- docs/README.md | 8 +- docs/persistence/README.md | 2 +- .../README.md | 31 +- .../architecture-overview.png | Bin .../guide.md | 203 +- .../raft-backend.png | Bin .../controller-architecture/controllers.md | 268 - .../controller-architecture/testing.md | 221 - docs/v2-architecture/service-mesh/README.md | 47 - .../service-mesh/controllers.png | Bin 479357 -> 0 bytes .../extensioncommon/basic_envoy_extender.go | 2 +- .../basic_extension_adapter.go | 2 +- .../extensioncommon/envoy_extender.go | 4 +- envoyextensions/extensioncommon/resources.go | 12 +- .../extensioncommon/resources_test.go | 2 +- .../extensioncommon/runtime_config.go | 2 +- .../extensioncommon/runtime_config_test.go | 2 +- .../upstream_envoy_extender.go | 2 +- envoyextensions/go.mod | 11 +- envoyextensions/go.sum | 6 +- envoyextensions/xdscommon/envoy_versioning.go | 2 +- .../xdscommon/envoy_versioning_test.go | 6 +- envoyextensions/xdscommon/proxysupport.go | 6 +- .../xdscommon/proxysupport_test.go | 2 +- envoyextensions/xdscommon/xdscommon.go | 6 +- fixup_acl_move.sh | 27 + go.mod | 79 +- go.sum | 224 +- grafana/README.md | 4 - .../consul-k8s-control-plane-monitoring.json | 3772 ------- .../pbacl/mock_ACLServiceClient.go | 188 - .../pbacl/mock_ACLServiceServer.go | 155 - .../pbacl/mock_UnsafeACLServiceServer.go | 64 - .../mock_ConnectCAServiceClient.go | 188 - .../mock_ConnectCAServiceServer.go | 143 - .../mock_ConnectCAService_WatchRootsClient.go | 384 - .../mock_ConnectCAService_WatchRootsServer.go | 349 - .../mock_UnsafeConnectCAServiceServer.go | 64 - .../mock_DataplaneServiceClient.go | 188 - .../mock_DataplaneServiceServer.go | 155 - .../mock_UnsafeDataplaneServiceServer.go | 64 - ...GetEnvoyBootstrapParamsRequest_NodeSpec.go | 64 - .../pbdns/mock_DNSServiceClient.go | 114 - .../pbdns/mock_DNSServiceServer.go | 96 - .../pbdns/mock_UnsafeDNSServiceServer.go | 64 - .../pbresource/mock_ResourceServiceClient.go | 632 -- .../pbresource/mock_ResourceServiceServer.go | 497 - .../mock_ResourceService_WatchListClient.go | 384 - .../mock_ResourceService_WatchListServer.go | 349 - .../mock_UnsafeResourceServiceServer.go | 64 - .../pbresource/mock_isWatchEvent_Event.go | 64 - .../mock_ServerDiscoveryServiceClient.go | 114 - .../mock_ServerDiscoveryServiceServer.go | 82 - ...rverDiscoveryService_WatchServersClient.go | 384 - ...rverDiscoveryService_WatchServersServer.go | 349 - ...mock_UnsafeServerDiscoveryServiceServer.go | 64 - internal/auth/exports.go | 41 - .../auth/internal/controllers/register.go | 18 - .../controllers/trafficpermissions/builder.go | 96 - .../trafficpermissions/controller.go | 361 - .../trafficpermissions/controller_test.go | 1219 --- .../expander/expander_ce.go | 14 - .../expander/expander_ce/expander_ce.go | 35 - .../trafficpermissions/expander/interface.go | 20 - .../trafficpermissions/helpers_ce.go | 32 - .../controllers/trafficpermissions/index.go | 43 - .../controllers/trafficpermissions/status.go | 68 - .../traffic_permissions_mapper.go | 73 - .../types/computed_traffic_permissions.go | 77 - .../computed_traffic_permissions_test.go | 147 - internal/auth/internal/types/errors.go | 17 - .../types/namespace_traffic_permissions.go | 68 - .../namespace_traffic_permissions_test.go | 145 - .../types/partition_traffic_permissions.go | 68 - .../partition_traffic_permissions_test.go | 145 - .../internal/types/traffic_permissions.go | 148 - .../types/traffic_permissions_test.go | 1012 -- internal/auth/internal/types/types.go | 28 - internal/auth/internal/types/validate.go | 203 - internal/auth/internal/types/validate_ce.go | 25 - .../auth/internal/types/workload_identity.go | 59 - .../internal/types/workload_identity_test.go | 104 - .../helpers/acl_hooks_test_helpers.go | 21 - .../{v2beta1 => v1alpha1}/api-service.json | 9 +- .../api-workload-1-health.json | 14 +- .../{v2beta1 => v1alpha1}/api-workload-1.json | 9 +- .../api-workload-10-health.json | 14 +- .../api-workload-10.json | 9 +- .../api-workload-11-health.json | 14 +- .../api-workload-11.json | 9 +- .../api-workload-12-health.json | 14 +- .../api-workload-12.json | 9 +- .../api-workload-13-health.json | 14 +- .../api-workload-13.json | 9 +- .../api-workload-14-health.json | 14 +- .../api-workload-14.json | 9 +- .../api-workload-15-health.json | 14 +- .../api-workload-15.json | 9 +- .../api-workload-16-health.json | 14 +- .../api-workload-16.json | 9 +- .../api-workload-17-health.json | 14 +- .../api-workload-17.json | 9 +- .../api-workload-18-health.json | 14 +- .../api-workload-18.json | 9 +- .../api-workload-19-health.json | 14 +- .../api-workload-19.json | 9 +- .../api-workload-2-health.json | 14 +- .../{v2beta1 => v1alpha1}/api-workload-2.json | 9 +- .../api-workload-20-health.json | 14 +- .../api-workload-20.json | 9 +- .../api-workload-3-health.json | 14 +- .../{v2beta1 => v1alpha1}/api-workload-3.json | 9 +- .../api-workload-4-health.json | 14 +- .../{v2beta1 => v1alpha1}/api-workload-4.json | 9 +- .../api-workload-5-health.json | 14 +- .../{v2beta1 => v1alpha1}/api-workload-5.json | 9 +- .../api-workload-6-health.json | 14 +- .../{v2beta1 => v1alpha1}/api-workload-6.json | 9 +- .../api-workload-7-health.json | 14 +- .../{v2beta1 => v1alpha1}/api-workload-7.json | 9 +- .../api-workload-8-health.json | 14 +- .../{v2beta1 => v1alpha1}/api-workload-8.json | 9 +- .../api-workload-9-health.json | 14 +- .../{v2beta1 => v1alpha1}/api-workload-9.json | 9 +- .../foo-service-endpoints.json | 16 +- .../{v2beta1 => v1alpha1}/foo-service.json | 11 +- .../grpc-api-service.json | 9 +- .../http-api-service.json | 9 +- .../v1alpha1/node-1-health.json | 33 + .../{v2beta1 => v1alpha1}/node-1.json | 10 +- .../v1alpha1/node-2-health.json | 33 + .../{v2beta1 => v1alpha1}/node-2.json | 10 +- .../v1alpha1/node-3-health.json | 33 + .../{v2beta1 => v1alpha1}/node-3.json | 10 +- .../v1alpha1/node-4-health.json | 33 + .../{v2beta1 => v1alpha1}/node-4.json | 10 +- .../v2beta1/node-1-health.json | 29 - .../v2beta1/node-2-health.json | 29 - .../v2beta1/node-3-health.json | 29 - .../v2beta1/node-4-health.json | 29 - internal/catalog/catalogtest/run_test.go | 44 +- ...2beta1.go => test_integration_v1alpha1.go} | 260 +- .../catalogtest/test_lifecycle_v2beta1.go | 730 -- internal/catalog/exports.go | 129 +- .../internal/controllers/endpoints/bound.go | 46 - .../controllers/endpoints/bound_test.go | 63 - .../controllers/endpoints/controller.go | 185 +- .../controllers/endpoints/controller_test.go | 769 +- .../endpoints/reconciliation_data.go | 180 + .../endpoints/reconciliation_data_test.go | 263 + .../internal/controllers/endpoints/status.go | 42 +- .../controllers/failover/controller.go | 420 - .../controllers/failover/controller_test.go | 479 - .../failover/expander/expander_ce.go | 12 - .../failover/expander/expander_ce/expander.go | 38 - .../expander/expander_ce/expander_test.go | 67 - .../failover/expander/interface.go | 17 - .../controllers/failover/helpers_ce.go | 14 - .../internal/controllers/failover/status.go | 109 - .../controllers/nodehealth/controller.go | 72 +- .../controllers/nodehealth/controller_test.go | 486 +- .../internal/controllers/nodehealth/status.go | 4 +- .../catalog/internal/controllers/register.go | 16 +- .../controllers/workloadhealth/controller.go | 140 +- .../workloadhealth/controller_test.go | 608 +- .../controllers/workloadhealth/status.go | 4 +- .../mappers/nodemapper/node_mapper.go | 107 + .../mappers/nodemapper/node_mapper_test.go | 149 + .../selectiontracker/selection_tracker.go | 124 +- .../selection_tracker_test.go | 278 + .../testhelpers/acl_hooks_test_helpers.go | 198 - .../types/computed_failover_policy.go | 78 - .../types/computed_failover_policy_test.go | 250 - internal/catalog/internal/types/dns_policy.go | 89 + .../catalog/internal/types/dns_policy_test.go | 162 + internal/catalog/internal/types/errors.go | 6 +- .../catalog/internal/types/errors_test.go | 2 +- .../catalog/internal/types/failover_policy.go | 359 - .../internal/types/failover_policy_test.go | 741 -- .../catalog/internal/types/health_checks.go | 37 +- .../internal/types/health_checks_test.go | 22 +- .../catalog/internal/types/health_status.go | 64 +- .../internal/types/health_status_test.go | 103 +- internal/catalog/internal/types/node.go | 50 +- .../internal/types/node_health_status.go | 90 - .../internal/types/node_health_status_test.go | 271 - internal/catalog/internal/types/node_test.go | 58 +- internal/catalog/internal/types/service.go | 69 +- .../internal/types/service_endpoints.go | 99 +- .../internal/types/service_endpoints_test.go | 210 +- .../catalog/internal/types/service_test.go | 86 +- .../types/testdata/errNotDNSLabel.golden | 2 +- internal/catalog/internal/types/types.go | 17 +- internal/catalog/internal/types/types_test.go | 26 +- internal/catalog/internal/types/validators.go | 244 +- .../catalog/internal/types/validators_test.go | 307 +- .../catalog/internal/types/virtual_ips.go | 44 +- .../internal/types/virtual_ips_test.go | 57 +- internal/catalog/internal/types/workload.go | 103 +- .../catalog/internal/types/workload_test.go | 227 +- internal/catalog/workloadselector/acls.go | 47 - .../catalog/workloadselector/acls_test.go | 123 - internal/catalog/workloadselector/gather.go | 114 - .../catalog/workloadselector/gather_test.go | 258 - internal/catalog/workloadselector/index.go | 72 - .../catalog/workloadselector/index_test.go | 131 - .../catalog/workloadselector/integ_test.go | 151 - internal/catalog/workloadselector/mapper.go | 45 - .../catalog/workloadselector/mapper_test.go | 180 - .../catalog/workloadselector/selecting.go | 16 - internal/controller/.mockery.yaml | 11 - internal/controller/api.go | 204 + internal/controller/api_test.go | 268 + internal/controller/cache/.mockery.yaml | 15 - internal/controller/cache/cache.go | 213 - internal/controller/cache/cache_test.go | 353 - .../controller/cache/cachemock/mock_Cache.go | 637 -- .../controller/cache/cachemock/mock_Query.go | 100 - .../cache/cachemock/mock_ReadOnlyCache.go | 432 - .../cache/cachemock/mock_ResourceIterator.go | 78 - .../cache/cachemock/mock_WriteCache.go | 119 - internal/controller/cache/client.go | 56 - internal/controller/cache/client_test.go | 265 - internal/controller/cache/clone.go | 93 - internal/controller/cache/clone_test.go | 214 - internal/controller/cache/decoded.go | 107 - internal/controller/cache/decoded_test.go | 360 - internal/controller/cache/errors.go | 73 - internal/controller/cache/errors_test.go | 59 - internal/controller/cache/index/.mockery.yaml | 23 - internal/controller/cache/index/builder.go | 33 - .../controller/cache/index/builder_test.go | 33 - .../controller/cache/index/convenience.go | 144 - .../cache/index/convenience_test.go | 383 - internal/controller/cache/index/errors.go | 16 - .../controller/cache/index/errors_test.go | 16 - internal/controller/cache/index/index.go | 96 - internal/controller/cache/index/index_test.go | 189 - .../cache/index/indexmock/mock_Indexer.go | 95 - .../index/indexmock/mock_MultiIndexer.go | 159 - .../index/indexmock/mock_ResourceIterator.go | 78 - .../index/indexmock/mock_SingleIndexer.go | 159 - .../index/indexmock/mock_resourceIterable.go | 97 - internal/controller/cache/index/interfaces.go | 60 - internal/controller/cache/index/iterator.go | 37 - .../controller/cache/index/iterator_test.go | 65 - .../testdata/MissingRequiredIndex.golden | 1 - internal/controller/cache/index/txn.go | 173 - internal/controller/cache/index/txn_test.go | 382 - .../controller/cache/indexers/.mockery.yaml | 15 - .../cache/indexers/decoded_indexer.go | 65 - .../cache/indexers/decoded_indexer_test.go | 295 - .../controller/cache/indexers/id_indexer.go | 48 - .../cache/indexers/id_indexer_test.go | 96 - .../indexersmock/mock_BoundReferences.go | 122 - .../indexers/indexersmock/mock_FromArgs.go | 95 - .../indexersmock/mock_GetSingleRefOrID.go | 80 - .../indexersmock/mock_MultiIndexer.go | 97 - .../indexersmock/mock_RefOrIDFetcher.go | 80 - .../indexersmock/mock_SingleIndexer.go | 97 - .../controller/cache/indexers/ref_indexer.go | 37 - .../cache/indexers/ref_indexer_test.go | 106 - internal/controller/cache/kind.go | 188 - internal/controller/cache/kind_test.go | 202 - .../cache/testdata/CacheTypeError.golden | 1 - .../cache/testdata/DuplicateIndexError.golden | 1 - .../cache/testdata/DuplicateQueryError.golden | 1 - .../cache/testdata/IndexError.golden | 1 - .../cache/testdata/IndexNotFound.golden | 1 - .../cache/testdata/QueryNotFound.golden | 1 - .../cache/testdata/QueryRequired.golden | 1 - internal/controller/controller.go | 477 +- internal/controller/controller_test.go | 740 -- .../controllermock/mock_CacheIDModifier.go | 95 - .../mock_CustomDependencyMapper.go | 93 - .../controllermock/mock_DependencyMapper.go | 95 - .../mock_DependencyTransform.go | 95 - .../controller/controllermock/mock_Lease.go | 116 - .../controllermock/mock_Reconciler.go | 81 - .../controller/controllermock/mock_task.go | 78 - internal/controller/controllertest/builder.go | 96 - internal/controller/custom_watch.go | 57 - internal/controller/dependencies.go | 112 - internal/controller/dependencies_test.go | 66 - internal/controller/dependency/.mockery.yaml | 11 - internal/controller/dependency/cache.go | 218 - internal/controller/dependency/cache_test.go | 346 - .../dependencymock/mock_CacheIDModifier.go | 96 - .../mock_DependencyTransform.go | 96 - .../controller/dependency/higher_order.go | 47 - .../dependency/higher_order_test.go | 144 - internal/controller/dependency/simple.go | 68 - internal/controller/dependency/transform.go | 74 - .../controller/dependency/transform_test.go | 166 - internal/controller/dependency_mappers.go | 61 + ...ple_test.go => dependency_mappers_test.go} | 66 +- internal/controller/doc.go | 2 +- internal/controller/helper.go | 71 - internal/controller/helper_test.go | 77 - internal/controller/manager.go | 18 +- internal/controller/mem_consistency_test.go | 374 - internal/controller/runner.go | 467 - .../controller/testdata/dependencies.golden | 5 - internal/controller/testing.go | 79 - internal/controller/watch.go | 42 - internal/dnsutil/dns.go | 91 - internal/dnsutil/dns_test.go | 122 - internal/go-sso/oidcauth/auth.go | 2 +- internal/go-sso/oidcauth/config.go | 3 +- internal/go-sso/oidcauth/config_test.go | 2 +- .../go-sso/oidcauth/internal/strutil/util.go | 2 +- .../oidcauth/internal/strutil/util_test.go | 2 +- internal/go-sso/oidcauth/jwt.go | 2 +- internal/go-sso/oidcauth/jwt_test.go | 2 +- internal/go-sso/oidcauth/oidc.go | 5 +- internal/go-sso/oidcauth/oidc_test.go | 2 +- .../go-sso/oidcauth/oidcauthtest/testing.go | 2 +- internal/go-sso/oidcauth/oidcjwt.go | 2 +- internal/go-sso/oidcauth/oidcjwt_test.go | 2 +- internal/go-sso/oidcauth/util.go | 2 +- internal/go-sso/oidcauth/util_test.go | 2 +- internal/hcp/exports.go | 34 - .../internal/controllers/link/controller.go | 246 - .../controllers/link/controller_test.go | 315 - .../hcp/internal/controllers/link/status.go | 142 - internal/hcp/internal/controllers/register.go | 30 - .../controllers/telemetrystate/controller.go | 203 - .../telemetrystate/controller_test.go | 174 - .../controllers/telemetrystate/status.go | 8 - internal/hcp/internal/types/link.go | 117 - internal/hcp/internal/types/link_test.go | 206 - .../hcp/internal/types/telemetry_state.go | 85 - internal/hcp/internal/types/testing.go | 23 - internal/hcp/internal/types/types.go | 11 - internal/mesh/exports.go | 49 +- .../controllers/apigateways/controller.go | 106 - .../apigateways/controller_test.go | 166 - .../apigateways/fetcher/data_fetcher.go | 44 - .../apigateways/fetcher/data_fetcher_test.go | 113 - .../explicitdestinations/controller.go | 319 - .../explicitdestinations/controller_test.go | 957 -- .../explicitdestinations/mapper/mapper.go | 74 - .../explicitdestinations/status.go | 121 - .../builder/api_gateway_builder.go | 154 - .../builder/mesh_gateway_builder.go | 426 - .../builder/mesh_gateway_builder_test.go | 343 - .../controllers/gatewayproxy/controller.go | 288 - .../gatewayproxy/controller_test.go | 318 - .../gatewayproxy/fetcher/data_fetcher.go | 165 - .../gatewayproxy/fetcher/data_fetcher_test.go | 266 - .../mapper/apigatewayworkloads.go | 55 - .../mapper/meshgatewayworkloads.go | 51 - .../implicitdestinations/auth_helper_test.go | 88 - .../implicitdestinations/controller.go | 314 - .../implicitdestinations/controller_test.go | 1573 --- .../controllers/implicitdestinations/index.go | 194 - .../implicitdestinations/index_test.go | 256 - .../implicitdestinations/mapper.go | 171 - .../implicitdestinations/status.go | 7 - .../meshconfiguration/controller.go | 33 - .../meshconfiguration/controller_test.go | 30 - .../controllers/meshgateways/controller.go | 80 - .../proxyconfiguration/controller.go | 188 - .../proxyconfiguration/controller_test.go | 358 - .../controllers/proxyconfiguration/sort.go | 108 - .../proxyconfiguration/sort_test.go | 160 - .../mesh/internal/controllers/register.go | 63 - .../internal/controllers/routes/controller.go | 208 - .../controllers/routes/controller_test.go | 1686 --- .../routes/destination_policy_validation.go | 60 - .../destination_policy_validation_test.go | 121 - .../internal/controllers/routes/generate.go | 861 -- .../controllers/routes/generate_test.go | 1977 ---- .../controllers/routes/intermediate.go | 72 - .../controllers/routes/loader/loader.go | 320 - .../controllers/routes/loader/loader_test.go | 442 - .../controllers/routes/loader/memoized.go | 93 - .../controllers/routes/loader/related.go | 233 - .../controllers/routes/pending_status.go | 92 - .../controllers/routes/ref_validation.go | 136 - .../controllers/routes/ref_validation_test.go | 275 - .../routes/routestest/routestest.go | 104 - .../internal/controllers/routes/sort_rules.go | 230 - .../controllers/routes/sort_rules_test.go | 492 - .../internal/controllers/routes/status.go | 238 - .../mesh/internal/controllers/routes/util.go | 20 - .../routes/xroutemapper/.mockery.yaml | 15 - .../controllers/routes/xroutemapper/util.go | 58 - .../routes/xroutemapper/xroutemapper.go | 298 - .../routes/xroutemapper/xroutemapper_test.go | 731 -- ...mock_ResolveFailoverServiceDestinations.go | 95 - .../sidecarproxy/builder/builder.go | 124 - .../sidecarproxy/builder/builder_test.go | 36 - .../builder/destination_multiport_test.go | 266 - .../sidecarproxy/builder/destinations.go | 706 -- .../sidecarproxy/builder/destinations_test.go | 576 - .../sidecarproxy/builder/expose_paths.go | 153 - .../sidecarproxy/builder/expose_paths_test.go | 111 - .../sidecarproxy/builder/local_app.go | 563 - .../builder/local_app_multiport_test.go | 173 - .../sidecarproxy/builder/local_app_test.go | 1122 -- .../sidecarproxy/builder/naming.go | 49 - .../sidecarproxy/builder/routes.go | 595 - ...cit-destinations-tproxy-default-bar.golden | 193 - ...destinations-tproxy-default-default.golden | 193 - ...xplicit-destinations-tproxy-foo-bar.golden | 193 - ...cit-destinations-tproxy-foo-default.golden | 193 - .../l4-multi-destination-default-bar.golden | 319 - ...4-multi-destination-default-default.golden | 319 - .../l4-multi-destination-foo-bar.golden | 319 - .../l4-multi-destination-foo-default.golden | 319 - ...cit-destinations-tproxy-default-bar.golden | 192 - ...destinations-tproxy-default-default.golden | 192 - ...mplicit-destinations-tproxy-foo-bar.golden | 192 - ...cit-destinations-tproxy-foo-default.golden | 192 - ...on-ip-port-bind-address-default-bar.golden | 164 - ...p-port-bind-address-default-default.golden | 164 - ...nation-ip-port-bind-address-foo-bar.golden | 164 - ...on-ip-port-bind-address-foo-default.golden | 164 - ...nix-socket-bind-address-default-bar.golden | 96 - ...socket-bind-address-default-default.golden | 96 - ...on-unix-socket-bind-address-foo-bar.golden | 96 - ...nix-socket-bind-address-foo-default.golden | 96 - ...icit-destination-tproxy-default-bar.golden | 127 - ...-destination-tproxy-default-default.golden | 127 - ...implicit-destination-tproxy-foo-bar.golden | 127 - ...icit-destination-tproxy-foo-default.golden | 127 - ...mixed-multi-destination-default-bar.golden | 414 - ...d-multi-destination-default-default.golden | 414 - .../mixed-multi-destination-foo-bar.golden | 414 - ...mixed-multi-destination-foo-default.golden | 414 - ...cit-destinations-tproxy-default-bar.golden | 494 - ...destinations-tproxy-default-default.golden | 494 - ...mplicit-destinations-tproxy-foo-bar.golden | 494 - ...cit-destinations-tproxy-foo-default.golden | 494 - ...icit-destination-tproxy-default-bar.golden | 275 - ...-destination-tproxy-default-default.golden | 275 - ...implicit-destination-tproxy-foo-bar.golden | 275 - ...icit-destination-tproxy-foo-default.golden | 275 - ...ltiple-workloads-tproxy-default-bar.golden | 275 - ...le-workloads-tproxy-default-default.golden | 275 - ...h-multiple-workloads-tproxy-foo-bar.golden | 275 - ...ltiple-workloads-tproxy-foo-default.golden | 275 - .../source/l7-expose-paths-default-bar.golden | 211 - .../l7-expose-paths-default-default.golden | 211 - .../source/l7-expose-paths-foo-bar.golden | 211 - .../source/l7-expose-paths-foo-default.golden | 211 - .../testdata/source/l7-expose-paths.golden | 211 - ...and-inbound-connections-default-bar.golden | 303 - ...inbound-connections-default-default.golden | 303 - ...cal-and-inbound-connections-foo-bar.golden | 303 - ...and-inbound-connections-foo-default.golden | 303 - .../local-and-inbound-connections.golden | 303 - ...ses-with-specific-ports-default-bar.golden | 338 - ...with-specific-ports-default-default.golden | 338 - ...dresses-with-specific-ports-foo-bar.golden | 338 - ...ses-with-specific-ports-foo-default.golden | 338 - ...kload-addresses-with-specific-ports.golden | 338 - ...addresses-without-ports-default-bar.golden | 290 - ...esses-without-ports-default-default.golden | 290 - ...oad-addresses-without-ports-foo-bar.golden | 290 - ...addresses-without-ports-foo-default.golden | 290 - ...le-workload-addresses-without-ports.golden | 290 - ...ses-with-specific-ports-default-bar.golden | 129 - ...with-specific-ports-default-default.golden | 129 - ...dresses-with-specific-ports-foo-bar.golden | 129 - ...ses-with-specific-ports-foo-default.golden | 129 - ...kload-addresses-with-specific-ports.golden | 129 - ...addresses-without-ports-default-bar.golden | 129 - ...esses-without-ports-default-default.golden | 129 - ...oad-addresses-without-ports-foo-bar.golden | 129 - ...addresses-without-ports-foo-default.golden | 129 - ...le-workload-addresses-without-ports.golden | 129 - ...d-address-without-ports-default-bar.golden | 129 - ...dress-without-ports-default-default.golden | 129 - ...kload-address-without-ports-foo-bar.golden | 129 - ...d-address-without-ports-foo-default.golden | 129 - ...ngle-workload-address-without-ports.golden | 129 - ...oad-with-only-mesh-port-default-bar.golden | 60 - ...with-only-mesh-port-default-default.golden | 60 - ...orkload-with-only-mesh-port-foo-bar.golden | 60 - ...oad-with-only-mesh-port-foo-default.golden | 60 - ...ort-l4-workload-with-only-mesh-port.golden | 60 - ...ses-with-specific-ports-default-bar.golden | 182 - ...with-specific-ports-default-default.golden | 182 - ...dresses-with-specific-ports-foo-bar.golden | 182 - ...ses-with-specific-ports-foo-default.golden | 182 - ...kload-addresses-with-specific-ports.golden | 182 - ...addresses-without-ports-default-bar.golden | 249 - ...esses-without-ports-default-default.golden | 249 - ...oad-addresses-without-ports-foo-bar.golden | 249 - ...addresses-without-ports-foo-default.golden | 249 - ...le-workload-addresses-without-ports.golden | 249 - ...d-address-without-ports-default-bar.golden | 249 - ...dress-without-ports-default-default.golden | 249 - ...kload-address-without-ports-foo-bar.golden | 249 - ...d-address-without-ports-foo-default.golden | 249 - ...ngle-workload-address-without-ports.golden | 249 - ...d-address-without-ports-default-bar.golden | 290 - ...dress-without-ports-default-default.golden | 290 - ...kload-address-without-ports-foo-bar.golden | 290 - ...d-address-without-ports-foo-default.golden | 290 - ...ngle-workload-address-without-ports.golden | 290 - .../controllers/sidecarproxy/controller.go | 350 - .../sidecarproxy/controller_test.go | 1004 -- .../controllers/sidecarproxy/data_fetcher.go | 333 - .../sidecarproxy/data_fetcher_test.go | 516 - .../controllers/sidecarproxy/helper_test.go | 87 - .../controllers/sidecarproxy/mapper.go | 210 - .../internal/controllers/xds/controller.go | 418 - .../controllers/xds/controller_test.go | 1333 --- .../controllers/xds/endpoint_builder.go | 78 - .../controllers/xds/endpoint_builder_test.go | 347 - .../internal/controllers/xds/leaf_cancels.go | 34 - .../internal/controllers/xds/leaf_mapper.go | 39 - .../internal/controllers/xds/mock_updater.go | 122 - .../controllers/xds/proxy_tracker_watch.go | 24 - .../controllers/xds/reconciliation_data.go | 61 - .../internal/controllers/xds/status/status.go | 131 - ...cit-destinations-tproxy-default-bar.golden | 185 - ...destinations-tproxy-default-default.golden | 184 - ...xplicit-destinations-tproxy-foo-bar.golden | 185 - ...cit-destinations-tproxy-foo-default.golden | 185 - .../l4-multi-destination-default-bar.golden | 301 - ...4-multi-destination-default-default.golden | 300 - .../l4-multi-destination-foo-bar.golden | 301 - .../l4-multi-destination-foo-default.golden | 301 - ...cit-destinations-tproxy-default-bar.golden | 184 - ...destinations-tproxy-default-default.golden | 183 - ...mplicit-destinations-tproxy-foo-bar.golden | 184 - ...cit-destinations-tproxy-foo-default.golden | 184 - ...on-ip-port-bind-address-default-bar.golden | 156 - ...p-port-bind-address-default-default.golden | 155 - ...nation-ip-port-bind-address-foo-bar.golden | 156 - ...on-ip-port-bind-address-foo-default.golden | 156 - ...nix-socket-bind-address-default-bar.golden | 93 - ...socket-bind-address-default-default.golden | 92 - ...on-unix-socket-bind-address-foo-bar.golden | 93 - ...nix-socket-bind-address-foo-default.golden | 93 - ...icit-destination-tproxy-default-bar.golden | 124 - ...-destination-tproxy-default-default.golden | 123 - ...implicit-destination-tproxy-foo-bar.golden | 124 - ...icit-destination-tproxy-foo-default.golden | 124 - ...mixed-multi-destination-default-bar.golden | 380 - ...d-multi-destination-default-default.golden | 379 - .../mixed-multi-destination-foo-bar.golden | 380 - ...mixed-multi-destination-foo-default.golden | 380 - ...cit-destinations-tproxy-default-bar.golden | 466 - ...destinations-tproxy-default-default.golden | 465 - ...mplicit-destinations-tproxy-foo-bar.golden | 466 - ...cit-destinations-tproxy-foo-default.golden | 466 - ...icit-destination-tproxy-default-bar.golden | 262 - ...-destination-tproxy-default-default.golden | 261 - ...implicit-destination-tproxy-foo-bar.golden | 262 - ...icit-destination-tproxy-foo-default.golden | 262 - ...ltiple-workloads-tproxy-default-bar.golden | 262 - ...le-workloads-tproxy-default-default.golden | 261 - ...h-multiple-workloads-tproxy-foo-bar.golden | 262 - ...ltiple-workloads-tproxy-foo-default.golden | 262 - .../source/l7-expose-paths-default-bar.golden | 213 - .../l7-expose-paths-default-default.golden | 212 - .../source/l7-expose-paths-foo-bar.golden | 213 - .../source/l7-expose-paths-foo-default.golden | 213 - .../testdata/source/l7-expose-paths.golden | 212 - ...and-inbound-connections-default-bar.golden | 305 - ...inbound-connections-default-default.golden | 304 - ...cal-and-inbound-connections-foo-bar.golden | 305 - ...and-inbound-connections-foo-default.golden | 305 - .../local-and-inbound-connections.golden | 304 - ...ses-with-specific-ports-default-bar.golden | 340 - ...with-specific-ports-default-default.golden | 339 - ...dresses-with-specific-ports-foo-bar.golden | 340 - ...ses-with-specific-ports-foo-default.golden | 340 - ...kload-addresses-with-specific-ports.golden | 339 - ...addresses-without-ports-default-bar.golden | 292 - ...esses-without-ports-default-default.golden | 291 - ...oad-addresses-without-ports-foo-bar.golden | 292 - ...addresses-without-ports-foo-default.golden | 292 - ...le-workload-addresses-without-ports.golden | 291 - ...ses-with-specific-ports-default-bar.golden | 131 - ...with-specific-ports-default-default.golden | 130 - ...dresses-with-specific-ports-foo-bar.golden | 131 - ...ses-with-specific-ports-foo-default.golden | 131 - ...kload-addresses-with-specific-ports.golden | 130 - ...addresses-without-ports-default-bar.golden | 131 - ...esses-without-ports-default-default.golden | 130 - ...oad-addresses-without-ports-foo-bar.golden | 131 - ...addresses-without-ports-foo-default.golden | 131 - ...le-workload-addresses-without-ports.golden | 130 - ...ngle-workload-address-without-ports.golden | 128 - ...oad-with-only-mesh-port-default-bar.golden | 62 - ...with-only-mesh-port-default-default.golden | 61 - ...orkload-with-only-mesh-port-foo-bar.golden | 62 - ...oad-with-only-mesh-port-foo-default.golden | 62 - ...ort-l4-workload-with-only-mesh-port.golden | 61 - ...ses-with-specific-ports-default-bar.golden | 184 - ...with-specific-ports-default-default.golden | 183 - ...dresses-with-specific-ports-foo-bar.golden | 184 - ...ses-with-specific-ports-foo-default.golden | 184 - ...kload-addresses-with-specific-ports.golden | 183 - ...addresses-without-ports-default-bar.golden | 251 - ...esses-without-ports-default-default.golden | 250 - ...oad-addresses-without-ports-foo-bar.golden | 251 - ...addresses-without-ports-foo-default.golden | 251 - ...le-workload-addresses-without-ports.golden | 250 - ...ngle-workload-address-without-ports.golden | 247 - ...d-address-without-ports-default-bar.golden | 292 - ...dress-without-ports-default-default.golden | 291 - ...kload-address-without-ports-foo-bar.golden | 292 - ...d-address-without-ports-foo-default.golden | 292 - ...ngle-workload-address-without-ports.golden | 291 - .../mappers/common/workload_selector_util.go | 58 - .../common/workload_selector_util_test.go | 68 - .../workload_selection_mapper.go | 80 - .../workload_selection_mapper_test.go | 145 - .../internal/meshindexes/computed_routes.go | 66 - .../meshindexes/computed_routes_test.go | 169 - internal/mesh/internal/types/api_gateway.go | 20 - .../types/computed_explicit_destinations.go | 19 - .../types/computed_implicit_destinations.go | 102 - .../computed_implicit_destinations_test.go | 268 - .../types/computed_proxy_configuration.go | 17 - .../mesh/internal/types/computed_routes.go | 152 - .../internal/types/computed_routes_test.go | 199 - internal/mesh/internal/types/decoded.go | 36 - .../mesh/internal/types/destination_policy.go | 275 - .../internal/types/destination_policy_test.go | 609 -- internal/mesh/internal/types/destinations.go | 169 - .../types/destinations_configuration.go | 40 - .../types/destinations_configuration_test.go | 90 - .../mesh/internal/types/destinations_test.go | 414 - internal/mesh/internal/types/errors.go | 16 - internal/mesh/internal/types/grpc_route.go | 237 - .../mesh/internal/types/grpc_route_test.go | 653 -- internal/mesh/internal/types/http_route.go | 350 - .../mesh/internal/types/http_route_test.go | 911 -- .../mesh/internal/types/intermediate/types.go | 24 - .../mesh/internal/types/mesh_configuration.go | 22 - internal/mesh/internal/types/mesh_gateway.go | 44 - .../mesh/internal/types/mesh_gateway_test.go | 97 - .../internal/types/proxy_configuration.go | 213 +- .../types/proxy_configuration_test.go | 360 - .../internal/types/proxy_state_template.go | 202 - .../types/proxy_state_template_test.go | 191 - internal/mesh/internal/types/tcp_route.go | 104 - .../mesh/internal/types/tcp_route_test.go | 237 - internal/mesh/internal/types/types.go | 24 +- internal/mesh/internal/types/types_test.go | 22 +- internal/mesh/internal/types/upstreams.go | 32 + internal/mesh/internal/types/util.go | 100 - internal/mesh/internal/types/xroute.go | 335 - internal/mesh/internal/types/xroute_test.go | 559 - .../mesh/proxy-snapshot/proxy_snapshot.go | 20 - .../mesh/proxy-tracker/mock_SessionLimiter.go | 53 - .../mesh/proxy-tracker/proxy_state_exports.go | 50 - .../proxy-tracker/proxy_state_exports_test.go | 77 - internal/mesh/proxy-tracker/proxy_tracker.go | 261 - .../mesh/proxy-tracker/proxy_tracker_test.go | 340 - internal/multicluster/exports.go | 51 - .../controllers/exportedservices/builder.go | 166 - .../exportedservices/controller.go | 390 - .../exportedservices/controller_test.go | 976 -- .../exportedservices/expander/expander_ce.go | 12 - .../expander/expander_ce/expander.go | 47 - .../expander/expander_ce/expander_test.go | 57 - .../exportedservices/expander/types/types.go | 10 - .../exportedservices/helpers_ce.go | 14 - .../controllers/exportedservices/status.go | 42 - .../internal/controllers/register.go | 26 - .../controllers/v1compat/controller.go | 414 - .../controllers/v1compat/controller_test.go | 428 - .../v1compat/mock_AggregatedConfig.go | 262 - .../types/computed_exported_services.go | 37 - .../types/computed_exported_services_test.go | 180 - .../multicluster/internal/types/decoded.go | 18 - .../internal/types/exported_services.go | 59 - .../internal/types/exported_services_test.go | 220 - .../multicluster/internal/types/helpers.go | 136 - .../multicluster/internal/types/helpers_ce.go | 68 - .../types/namespace_exported_services.go | 33 - .../types/namespace_exported_services_test.go | 187 - .../types/partition_exported_services.go | 33 - .../types/partition_exported_services_test.go | 187 - internal/multicluster/internal/types/types.go | 23 - .../multicluster/internal/types/types_ce.go | 12 - internal/protohcl/any.go | 117 - internal/protohcl/attributes.go | 157 - internal/protohcl/blocks.go | 115 - internal/protohcl/cty.go | 149 - internal/protohcl/decoder.go | 287 - internal/protohcl/doc.go | 79 - internal/protohcl/naming.go | 21 - internal/protohcl/oneof.go | 54 - internal/protohcl/primitives.go | 147 - internal/protohcl/testproto/buf.gen.yaml | 12 - internal/protohcl/testproto/example.pb.go | 997 -- internal/protohcl/testproto/example.proto | 77 - internal/protohcl/unmarshal.go | 137 - internal/protohcl/unmarshal_test.go | 610 -- internal/protohcl/well_known_types.go | 421 - internal/protoutil/protoutil.go | 23 - internal/resource/acls.go | 13 - internal/resource/authz.go | 20 - internal/resource/authz_ce.go | 17 - internal/resource/authz_ce_test.go | 39 - internal/resource/bound_refs.go | 65 - internal/resource/decode.go | 105 - internal/resource/decode_test.go | 161 - internal/resource/demo/controller.go | 11 +- internal/resource/demo/controller_test.go | 6 +- internal/resource/demo/demo.go | 153 +- internal/resource/equality.go | 25 +- internal/resource/equality_test.go | 325 +- internal/resource/errors.go | 71 +- internal/resource/errors_test.go | 7 +- internal/resource/filter.go | 109 - internal/resource/filter_test.go | 195 - internal/resource/hooks.go | 107 - internal/resource/hooks_test.go | 243 - internal/resource/http/http.go | 314 - internal/resource/http/http_test.go | 620 -- .../resource/mappers/bimapper/bimapper.go | 323 - .../mappers/bimapper/bimapper_test.go | 489 - .../selection_tracker_test.go | 375 - .../internal/generate/generate.go | 64 - internal/resource/protoc-gen-deepcopy/main.go | 30 - .../internal/generate/generate.go | 112 - .../resource/protoc-gen-json-shim/main.go | 30 - .../internal/generate/generate.go | 158 - .../protoc-gen-resource-types/main.go | 27 - internal/resource/reaper/controller.go | 11 +- internal/resource/reaper/controller_test.go | 340 +- internal/resource/reference.go | 47 +- internal/resource/refkey.go | 91 - internal/resource/refkey_test.go | 85 - internal/resource/registry.go | 101 +- internal/resource/registry_ce.go | 38 - internal/resource/registry_test.go | 61 +- internal/resource/resource.go | 97 - internal/resource/resource_test.go | 53 - internal/resource/resourcetest/acls.go | 119 - internal/resource/resourcetest/builder.go | 105 +- internal/resource/resourcetest/client.go | 245 +- internal/resource/resourcetest/decode.go | 24 - internal/resource/resourcetest/require.go | 26 - internal/resource/resourcetest/tenancy.go | 90 - internal/resource/resourcetest/testing.go | 14 +- internal/resource/resourcetest/validation.go | 31 - internal/resource/sort.go | 67 - internal/resource/sort_test.go | 145 - internal/resource/stringer.go | 60 - internal/resource/tenancy.go | 171 - internal/resource/tenancy_test.go | 237 - internal/resourcehcl/any.go | 52 - internal/resourcehcl/naming.go | 41 - .../resourcehcl/testdata/destinations.golden | 1 - .../resourcehcl/testdata/destinations.hcl | 25 - .../fuzz/FuzzUnmarshall/0e4b8ec300611dbc | 2 - .../fuzz/FuzzUnmarshall/c800420b7494c6d1 | 2 - .../fuzz/FuzzUnmarshall/eaba8205942c3f31 | 2 - .../testdata/gvk-no-arguments.error | 1 - .../resourcehcl/testdata/gvk-no-arguments.hcl | 4 - .../resourcehcl/testdata/invalid-group.error | 1 - .../resourcehcl/testdata/invalid-group.hcl | 8 - .../resourcehcl/testdata/invalid-gvk.error | 1 - internal/resourcehcl/testdata/invalid-gvk.hcl | 4 - .../testdata/invalid-metadata.error | 1 - .../resourcehcl/testdata/invalid-metadata.hcl | 8 - .../resourcehcl/testdata/invalid-name.error | 1 - .../resourcehcl/testdata/invalid-name.hcl | 4 - .../testdata/no-blocks-any-first.golden | 1 - .../testdata/no-blocks-any-first.hcl | 8 - .../resourcehcl/testdata/no-blocks.golden | 1 - internal/resourcehcl/testdata/no-blocks.hcl | 33 - internal/resourcehcl/testdata/owner.golden | 1 - internal/resourcehcl/testdata/owner.hcl | 9 - .../resourcehcl/testdata/simple-gvk.golden | 1 - internal/resourcehcl/testdata/simple-gvk.hcl | 13 - .../resourcehcl/testdata/type-block.golden | 1 - internal/resourcehcl/testdata/type-block.hcl | 8 - .../testdata/unknown-field-block.error | 1 - .../testdata/unknown-field-block.hcl | 3 - .../testdata/unknown-field-object.error | 1 - .../testdata/unknown-field-object.hcl | 3 - .../resourcehcl/testdata/unknown-type.error | 1 - .../resourcehcl/testdata/unknown-type.hcl | 8 - internal/resourcehcl/unmarshal.go | 55 - internal/resourcehcl/unmarshal_test.go | 147 - internal/storage/conformance/conformance.go | 98 +- internal/storage/inmem/backend.go | 2 +- internal/storage/inmem/backend_test.go | 5 +- internal/storage/inmem/event_index.go | 2 +- internal/storage/inmem/schema.go | 22 +- internal/storage/inmem/snapshot.go | 2 +- internal/storage/inmem/snapshot_test.go | 12 +- internal/storage/inmem/store.go | 20 +- internal/storage/inmem/watch.go | 64 +- internal/storage/raft/backend.go | 2 +- internal/storage/raft/conformance_test.go | 8 +- internal/storage/raft/forwarding.go | 2 +- internal/storage/storage.go | 6 +- internal/tenancy/exports.go | 34 - .../tenancy/internal/bridge/tenancy_bridge.go | 76 - .../internal/bridge/tenancy_bridge_ce.go | 21 - .../internal/controllers/common/common.go | 196 - .../controllers/namespace/controller.go | 94 - .../tenancy/internal/controllers/register.go | 12 - .../internal/controllers/register_ce.go | 15 - internal/tenancy/internal/types/errors.go | 11 - internal/tenancy/internal/types/namespace.go | 69 - internal/tenancy/internal/types/types.go | 10 - internal/tenancy/internal/types/types_ce.go | 12 - internal/tenancy/internal/types/types_test.go | 121 - .../tenancytest/namespace_controller_test.go | 156 - .../tenancy/tenancytest/namespace_test.go | 116 - internal/testing/errors/errors.go | 41 - internal/testing/golden/golden.go | 24 +- .../e2e/consul/agent/structs/structs.go | 2 +- .../e2e/consul/proto/pbcommon/common.go | 2 +- .../tools/proto-gen-rpc-glue/e2e/source.pb.go | 2 +- internal/tools/proto-gen-rpc-glue/main.go | 2 +- .../tools/proto-gen-rpc-glue/main_test.go | 2 +- .../protoc-gen-consul-rate-limit/main.go | 6 +- .../postprocess/main.go | 5 +- .../protoc-gen-grpc-clone/e2e/.mockery.yaml | 15 - .../protoc-gen-grpc-clone/e2e/e2e_test.go | 72 - .../e2e/mock_SimpleClient_test.go | 180 - .../e2e/mock_Simple_FlowClient_test.go | 356 - .../e2e/proto/buf.gen.yaml | 22 - .../e2e/proto/cloning_stream.pb.go | 31 - .../e2e/proto/service.pb.go | 258 - .../e2e/proto/service.proto | 22 - .../e2e/proto/service_cloning_grpc.pb.go | 69 - .../e2e/proto/service_grpc.pb.go | 167 - .../internal/generate/generate.go | 146 - .../generate/templates/cloning-stream.tmpl | 31 - .../internal/generate/templates/file.tmpl | 14 - .../templates/server-stream-method.tmpl | 10 - .../internal/generate/templates/service.tmpl | 44 - .../generate/templates/unary-method.tmpl | 10 - internal/tools/protoc-gen-grpc-clone/main.go | 27 - ipaddr/detect.go | 2 +- ipaddr/detect_test.go | 2 +- ipaddr/ipaddr.go | 2 +- ipaddr/ipaddr_test.go | 2 +- lib/cluster.go | 2 +- lib/cluster_test.go | 2 +- lib/decode/decode.go | 2 +- lib/decode/decode_test.go | 2 +- lib/eof.go | 2 +- lib/eof_test.go | 2 +- lib/file/atomic.go | 2 +- lib/file/atomic_test.go | 2 +- lib/hoststats/collector.go | 16 +- lib/hoststats/collector_test.go | 48 - lib/json.go | 2 +- lib/map_walker.go | 2 +- lib/map_walker_test.go | 2 +- lib/maps/maps.go | 2 +- lib/maps/maps_test.go | 2 +- lib/math.go | 2 +- lib/math_test.go | 2 +- lib/mutex/mutex.go | 2 +- lib/mutex/mutex_test.go | 2 +- lib/path.go | 2 +- lib/retry/retry.go | 2 +- lib/retry/retry_test.go | 2 +- lib/routine/routine.go | 2 +- lib/routine/routine_test.go | 2 +- lib/rtt.go | 2 +- lib/rtt_test.go | 2 +- lib/semaphore/semaphore.go | 2 +- lib/semaphore/semaphore_test.go | 2 +- lib/serf/serf.go | 2 +- lib/stop_context.go | 2 +- lib/stop_context_test.go | 2 +- lib/strings.go | 2 +- lib/stringslice/stringslice.go | 2 +- lib/stringslice/stringslice_test.go | 2 +- lib/telemetry.go | 12 +- lib/telemetry_test.go | 2 +- lib/template/hil.go | 2 +- lib/template/hil_test.go | 2 +- lib/testhelpers/testhelpers.go | 14 - lib/translate.go | 2 +- lib/translate_test.go | 2 +- lib/ttlcache/eviction.go | 2 +- lib/ttlcache/eviction_test.go | 2 +- lib/useragent.go | 2 +- lib/useragent_test.go | 2 +- lib/uuid.go | 2 +- logging/gated_writer.go | 2 +- logging/gated_writer_test.go | 2 +- logging/grpc.go | 2 +- logging/grpc_test.go | 2 +- logging/log_levels.go | 2 +- logging/logfile.go | 2 +- logging/logfile_bsd.go | 3 +- logging/logfile_linux.go | 1 + logging/logfile_solaris.go | 1 + logging/logfile_test.go | 2 +- logging/logger.go | 2 +- logging/logger_test.go | 2 +- logging/monitor/monitor.go | 2 +- logging/monitor/monitor_test.go | 2 +- logging/names.go | 9 +- logging/syslog.go | 2 +- logging/syslog_test.go | 3 +- logging/syslog_unsupported_test.go | 3 +- main.go | 2 +- proto-public/LICENSE | 365 - .../annotations/ratelimit/ratelimit.pb.go | 59 +- .../annotations/ratelimit/ratelimit.proto | 1 - .../ratelimit/ratelimit_deepcopy.gen.go | 27 - .../ratelimit/ratelimit_json.gen.go | 22 - proto-public/buf.gen.yaml | 20 - proto-public/buf.lock | 8 - proto-public/buf.yaml | 3 - proto-public/go.mod | 8 +- proto-public/go.sum | 27 +- proto-public/pbacl/acl_cloning_grpc.pb.go | 69 - proto-public/pbacl/acl_deepcopy.gen.go | 111 - proto-public/pbacl/acl_json.gen.go | 66 - .../computed_traffic_permissions.pb.binary.go | 18 - .../computed_traffic_permissions.pb.go | 223 - .../computed_traffic_permissions.proto | 22 - ...mputed_traffic_permissions_deepcopy.gen.go | 27 - .../computed_traffic_permissions_json.gen.go | 22 - .../pbauth/v2beta1/resources.rtypes.go | 50 - .../v2beta1/traffic_permission_extras_test.go | 63 - .../v2beta1/traffic_permissions.pb.binary.go | 108 - .../pbauth/v2beta1/traffic_permissions.pb.go | 1194 -- .../pbauth/v2beta1/traffic_permissions.proto | 141 - .../v2beta1/traffic_permissions_addon.go | 25 - .../traffic_permissions_deepcopy.gen.go | 216 - .../v2beta1/traffic_permissions_extras.go | 60 - .../v2beta1/traffic_permissions_json.gen.go | 121 - .../v2beta1/workload_identity.pb.binary.go | 18 - .../pbauth/v2beta1/workload_identity.pb.go | 158 - .../pbauth/v2beta1/workload_identity.proto | 12 - .../v2beta1/workload_identity_deepcopy.gen.go | 27 - .../v2beta1/workload_identity_json.gen.go | 22 - .../v1alpha1/dns.pb.binary.go} | 12 +- proto-public/pbcatalog/v1alpha1/dns.pb.go | 259 + proto-public/pbcatalog/v1alpha1/dns.proto | 18 + .../{v2beta1 => v1alpha1}/health.pb.binary.go | 14 +- proto-public/pbcatalog/v1alpha1/health.pb.go | 1050 ++ .../{v2beta1 => v1alpha1}/health.proto | 30 +- .../{v2beta1 => v1alpha1}/node.pb.binary.go | 4 +- proto-public/pbcatalog/v1alpha1/node.pb.go | 244 + .../{v2beta1 => v1alpha1}/node.proto | 6 +- .../pbcatalog/v1alpha1/protocol.pb.go | 166 + .../pbcatalog/v1alpha1/protocol.proto | 17 + .../selector.pb.binary.go | 4 +- .../pbcatalog/v1alpha1/selector.pb.go | 177 + .../{v2beta1 => v1alpha1}/selector.proto | 3 +- .../service.pb.binary.go | 4 +- proto-public/pbcatalog/v1alpha1/service.pb.go | 301 + .../{v2beta1 => v1alpha1}/service.proto | 16 +- .../service_endpoints.pb.binary.go | 4 +- .../v1alpha1/service_endpoints.pb.go | 308 + .../service_endpoints.proto | 15 +- .../{v2beta1 => v1alpha1}/vip.pb.binary.go | 4 +- proto-public/pbcatalog/v1alpha1/vip.pb.go | 244 + .../pbcatalog/{v2beta1 => v1alpha1}/vip.proto | 6 +- .../workload.pb.binary.go | 24 +- .../pbcatalog/v1alpha1/workload.pb.go | 520 + .../{v2beta1 => v1alpha1}/workload.proto | 19 +- .../computed_failover_policy.pb.binary.go | 18 - .../v2beta1/computed_failover_policy.pb.go | 227 - .../v2beta1/computed_failover_policy.proto | 26 - .../computed_failover_policy_deepcopy.gen.go | 27 - .../computed_failover_policy_extras.go | 48 - .../computed_failover_policy_extras_test.go | 70 - .../computed_failover_policy_json.gen.go | 22 - .../v2beta1/failover_policy.pb.binary.go | 38 - .../pbcatalog/v2beta1/failover_policy.pb.go | 467 - .../pbcatalog/v2beta1/failover_policy.proto | 60 - .../v2beta1/failover_policy_deepcopy.gen.go | 69 - .../v2beta1/failover_policy_extras.go | 15 - .../v2beta1/failover_policy_extras_test.go | 110 - .../v2beta1/failover_policy_json.gen.go | 44 - proto-public/pbcatalog/v2beta1/health.pb.go | 1158 -- .../pbcatalog/v2beta1/health_deepcopy.gen.go | 216 - .../pbcatalog/v2beta1/health_json.gen.go | 121 - proto-public/pbcatalog/v2beta1/node.pb.go | 247 - .../pbcatalog/v2beta1/node_deepcopy.gen.go | 48 - .../pbcatalog/v2beta1/node_json.gen.go | 33 - proto-public/pbcatalog/v2beta1/protocol.pb.go | 171 - proto-public/pbcatalog/v2beta1/protocol.proto | 19 - .../pbcatalog/v2beta1/resources.rtypes.go | 85 - proto-public/pbcatalog/v2beta1/selector.pb.go | 185 - .../v2beta1/selector_deepcopy.gen.go | 27 - .../pbcatalog/v2beta1/selector_json.gen.go | 22 - proto-public/pbcatalog/v2beta1/service.pb.go | 310 - .../pbcatalog/v2beta1/service_addon.go | 120 - .../pbcatalog/v2beta1/service_addon_test.go | 208 - .../pbcatalog/v2beta1/service_deepcopy.gen.go | 48 - .../pbcatalog/v2beta1/service_endpoints.pb.go | 336 - .../v2beta1/service_endpoints_addon.go | 29 - .../v2beta1/service_endpoints_addon_test.go | 52 - .../v2beta1/service_endpoints_deepcopy.gen.go | 48 - .../v2beta1/service_endpoints_json.gen.go | 33 - .../pbcatalog/v2beta1/service_json.gen.go | 33 - proto-public/pbcatalog/v2beta1/vip.pb.go | 247 - .../pbcatalog/v2beta1/vip_deepcopy.gen.go | 48 - .../pbcatalog/v2beta1/vip_json.gen.go | 33 - proto-public/pbcatalog/v2beta1/workload.pb.go | 675 -- .../pbcatalog/v2beta1/workload_addon.go | 80 - .../pbcatalog/v2beta1/workload_addon_test.go | 262 - .../v2beta1/workload_deepcopy.gen.go | 132 - .../pbcatalog/v2beta1/workload_json.gen.go | 77 - .../pbconnectca/ca_cloning_grpc.pb.go | 69 - proto-public/pbconnectca/ca_deepcopy.gen.go | 111 - proto-public/pbconnectca/ca_json.gen.go | 66 - proto-public/pbconnectca/cloning_stream.pb.go | 31 - proto-public/pbdataplane/dataplane.pb.go | 310 +- proto-public/pbdataplane/dataplane.proto | 25 +- .../pbdataplane/dataplane_cloning_grpc.pb.go | 69 - .../pbdataplane/dataplane_deepcopy.gen.go | 111 - .../pbdataplane/dataplane_json.gen.go | 66 - proto-public/pbdns/dns_cloning_grpc.pb.go | 58 - proto-public/pbdns/dns_deepcopy.gen.go | 48 - proto-public/pbdns/dns_json.gen.go | 33 - proto-public/pbdns/mock_DNSServiceClient.go | 64 + proto-public/pbdns/mock_DNSServiceServer.go | 55 + .../pbdns/mock_UnsafeDNSServiceServer.go | 30 + proto-public/pbhcp/v2/hcp_config.pb.binary.go | 18 - proto-public/pbhcp/v2/hcp_config.pb.go | 199 - proto-public/pbhcp/v2/hcp_config.proto | 23 - .../pbhcp/v2/hcp_config_deepcopy.gen.go | 27 - proto-public/pbhcp/v2/hcp_config_json.gen.go | 22 - proto-public/pbhcp/v2/link.pb.binary.go | 18 - proto-public/pbhcp/v2/link.pb.go | 283 - proto-public/pbhcp/v2/link.proto | 26 - proto-public/pbhcp/v2/link_deepcopy.gen.go | 27 - proto-public/pbhcp/v2/link_json.gen.go | 22 - proto-public/pbhcp/v2/resources.rtypes.go | 29 - .../pbhcp/v2/telemetry_state.pb.binary.go | 38 - proto-public/pbhcp/v2/telemetry_state.pb.go | 426 - proto-public/pbhcp/v2/telemetry_state.proto | 55 - .../pbhcp/v2/telemetry_state_deepcopy.gen.go | 69 - .../pbhcp/v2/telemetry_state_json.gen.go | 44 - .../connection.pb.binary.go | 4 +- proto-public/pbmesh/v1alpha1/connection.pb.go | 316 + proto-public/pbmesh/v1alpha1/connection.proto | 22 + .../{v2beta1 => v1alpha1}/expose.pb.binary.go | 4 +- proto-public/pbmesh/v1alpha1/expose.pb.go | 269 + proto-public/pbmesh/v1alpha1/expose.proto | 19 + .../proxy.pb.binary.go} | 24 +- proto-public/pbmesh/v1alpha1/proxy.pb.go | 816 ++ proto-public/pbmesh/v1alpha1/proxy.proto | 104 + proto-public/pbmesh/v1alpha1/routing.pb.go | 181 + .../{v2beta1 => v1alpha1}/routing.proto | 6 +- .../upstreams.pb.binary.go} | 36 +- proto-public/pbmesh/v1alpha1/upstreams.pb.go | 997 ++ proto-public/pbmesh/v1alpha1/upstreams.proto | 100 + .../pbmesh/v2beta1/api_gateway.pb.binary.go | 38 - proto-public/pbmesh/v2beta1/api_gateway.pb.go | 398 - proto-public/pbmesh/v2beta1/api_gateway.proto | 54 - .../v2beta1/api_gateway_deepcopy.gen.go | 69 - .../pbmesh/v2beta1/api_gateway_json.gen.go | 44 - .../pbmesh/v2beta1/common.pb.binary.go | 28 - proto-public/pbmesh/v2beta1/common.pb.go | 280 - proto-public/pbmesh/v2beta1/common.proto | 37 - .../pbmesh/v2beta1/common_deepcopy.gen.go | 48 - .../pbmesh/v2beta1/common_json.gen.go | 33 - ...omputed_explicit_destinations.pb.binary.go | 18 - .../computed_explicit_destinations.pb.go | 180 - .../computed_explicit_destinations.proto | 16 - ...uted_explicit_destinations_deepcopy.gen.go | 27 - ...computed_explicit_destinations_json.gen.go | 22 - .../computed_gateway_routes.pb.binary.go | 18 - .../v2beta1/computed_gateway_routes.pb.go | 215 - .../v2beta1/computed_gateway_routes.proto | 27 - .../computed_gateway_routes_deepcopy.gen.go | 27 - .../computed_gateway_routes_json.gen.go | 22 - ...omputed_implicit_destinations.pb.binary.go | 28 - .../computed_implicit_destinations.pb.go | 273 - .../computed_implicit_destinations.proto | 24 - ...uted_implicit_destinations_deepcopy.gen.go | 48 - ...computed_implicit_destinations_json.gen.go | 33 - .../computed_proxy_configuration.pb.binary.go | 18 - .../computed_proxy_configuration.pb.go | 199 - .../computed_proxy_configuration.proto | 21 - ...mputed_proxy_configuration_deepcopy.gen.go | 27 - .../computed_proxy_configuration_json.gen.go | 22 - .../v2beta1/computed_routes.pb.binary.go | 148 - .../pbmesh/v2beta1/computed_routes.pb.go | 1625 --- .../pbmesh/v2beta1/computed_routes.proto | 175 - .../v2beta1/computed_routes_deepcopy.gen.go | 300 - .../v2beta1/computed_routes_json.gen.go | 165 - proto-public/pbmesh/v2beta1/connection.pb.go | 328 - proto-public/pbmesh/v2beta1/connection.proto | 30 - .../pbmesh/v2beta1/connection_deepcopy.gen.go | 48 - .../pbmesh/v2beta1/connection_json.gen.go | 33 - .../v2beta1/destination_policy.pb.binary.go | 88 - .../pbmesh/v2beta1/destination_policy.pb.go | 1106 -- .../pbmesh/v2beta1/destination_policy.proto | 163 - .../destination_policy_deepcopy.gen.go | 174 - .../v2beta1/destination_policy_json.gen.go | 99 - .../pbmesh/v2beta1/destinations.pb.binary.go | 58 - .../pbmesh/v2beta1/destinations.pb.go | 671 -- .../pbmesh/v2beta1/destinations.proto | 80 - .../destinations_configuration.pb.binary.go | 58 - .../v2beta1/destinations_configuration.pb.go | 704 -- .../v2beta1/destinations_configuration.proto | 112 - ...destinations_configuration_deepcopy.gen.go | 111 - .../destinations_configuration_json.gen.go | 66 - .../v2beta1/destinations_deepcopy.gen.go | 111 - .../pbmesh/v2beta1/destinations_json.gen.go | 66 - proto-public/pbmesh/v2beta1/expose.pb.go | 322 - proto-public/pbmesh/v2beta1/expose.proto | 25 - .../pbmesh/v2beta1/expose_deepcopy.gen.go | 48 - .../pbmesh/v2beta1/expose_json.gen.go | 33 - .../pbmesh/v2beta1/grpc_route.pb.binary.go | 78 - proto-public/pbmesh/v2beta1/grpc_route.pb.go | 908 -- proto-public/pbmesh/v2beta1/grpc_route.proto | 145 - .../pbmesh/v2beta1/grpc_route_deepcopy.gen.go | 153 - .../pbmesh/v2beta1/grpc_route_json.gen.go | 88 - .../pbmesh/v2beta1/http_route.pb.binary.go | 118 - proto-public/pbmesh/v2beta1/http_route.pb.go | 1445 --- proto-public/pbmesh/v2beta1/http_route.proto | 263 - .../pbmesh/v2beta1/http_route_deepcopy.gen.go | 237 - .../pbmesh/v2beta1/http_route_json.gen.go | 132 - .../v2beta1/http_route_retries.pb.binary.go | 18 - .../pbmesh/v2beta1/http_route_retries.pb.go | 211 - .../pbmesh/v2beta1/http_route_retries.proto | 26 - .../http_route_retries_deepcopy.gen.go | 27 - .../v2beta1/http_route_retries_json.gen.go | 22 - .../v2beta1/http_route_timeouts.pb.binary.go | 18 - .../pbmesh/v2beta1/http_route_timeouts.pb.go | 191 - .../pbmesh/v2beta1/http_route_timeouts.proto | 21 - .../http_route_timeouts_deepcopy.gen.go | 27 - .../v2beta1/http_route_timeouts_json.gen.go | 22 - .../v2beta1/mesh_configuration.pb.binary.go | 18 - .../pbmesh/v2beta1/mesh_configuration.pb.go | 160 - .../pbmesh/v2beta1/mesh_configuration.proto | 14 - .../mesh_configuration_deepcopy.gen.go | 27 - .../v2beta1/mesh_configuration_json.gen.go | 22 - .../pbmesh/v2beta1/mesh_gateway.pb.binary.go | 28 - .../pbmesh/v2beta1/mesh_gateway.pb.go | 289 - .../pbmesh/v2beta1/mesh_gateway.proto | 31 - .../v2beta1/mesh_gateway_deepcopy.gen.go | 48 - .../pbmesh/v2beta1/mesh_gateway_json.gen.go | 33 - .../pbproxystate/access_logs.pb.binary.go | 18 - .../v2beta1/pbproxystate/access_logs.pb.go | 327 - .../v2beta1/pbproxystate/access_logs.proto | 34 - .../pbproxystate/access_logs_deepcopy.gen.go | 27 - .../pbproxystate/access_logs_json.gen.go | 22 - .../v2beta1/pbproxystate/address.pb.binary.go | 28 - .../pbmesh/v2beta1/pbproxystate/address.pb.go | 253 - .../pbmesh/v2beta1/pbproxystate/address.proto | 20 - .../pbproxystate/address_deepcopy.gen.go | 48 - .../v2beta1/pbproxystate/address_json.gen.go | 33 - .../v2beta1/pbproxystate/cluster.pb.binary.go | 268 - .../pbmesh/v2beta1/pbproxystate/cluster.pb.go | 2643 ----- .../pbmesh/v2beta1/pbproxystate/cluster.proto | 197 - .../pbproxystate/cluster_deepcopy.gen.go | 552 - .../v2beta1/pbproxystate/cluster_json.gen.go | 297 - .../v2beta1/pbproxystate/endpoints.pb.go | 387 - .../v2beta1/pbproxystate/endpoints.proto | 31 - .../pbproxystate/endpoints_deepcopy.gen.go | 48 - .../pbproxystate/endpoints_json.gen.go | 33 - .../pbproxystate/escape_hatches.pb.binary.go | 18 - .../v2beta1/pbproxystate/escape_hatches.pb.go | 173 - .../v2beta1/pbproxystate/escape_hatches.proto | 11 - .../escape_hatches_deepcopy.gen.go | 27 - .../pbproxystate/escape_hatches_json.gen.go | 22 - .../header_mutations.pb.binary.go | 68 - .../pbproxystate/header_mutations.pb.go | 700 -- .../pbproxystate/header_mutations.proto | 50 - .../header_mutations_deepcopy.gen.go | 132 - .../pbproxystate/header_mutations_json.gen.go | 77 - .../v2beta1/pbproxystate/intentions.pb.go | 211 - .../v2beta1/pbproxystate/listener.pb.go | 1500 --- .../v2beta1/pbproxystate/listener.proto | 171 - .../pbproxystate/listener_deepcopy.gen.go | 174 - .../v2beta1/pbproxystate/listener_json.gen.go | 99 - .../v2beta1/pbproxystate/protocol.pb.go | 175 - .../v2beta1/pbproxystate/protocol.proto | 19 - .../v2beta1/pbproxystate/protocol_test.go | 22 - .../pbproxystate/references.pb.binary.go | 38 - .../v2beta1/pbproxystate/references.pb.go | 382 - .../v2beta1/pbproxystate/references.proto | 31 - .../pbproxystate/references_deepcopy.gen.go | 69 - .../pbproxystate/references_json.gen.go | 44 - .../v2beta1/pbproxystate/route.pb.binary.go | 168 - .../pbmesh/v2beta1/pbproxystate/route.pb.go | 1830 ---- .../pbmesh/v2beta1/pbproxystate/route.proto | 134 - .../pbproxystate/route_deepcopy.gen.go | 342 - .../v2beta1/pbproxystate/route_json.gen.go | 187 - .../traffic_permissions.pb.binary.go | 78 - .../pbproxystate/traffic_permissions.pb.go | 799 -- .../pbproxystate/traffic_permissions.proto | 64 - .../traffic_permissions_deepcopy.gen.go | 153 - .../traffic_permissions_json.gen.go | 88 - .../transport_socket.pb.binary.go | 138 - .../pbproxystate/transport_socket.pb.go | 1507 --- .../pbproxystate/transport_socket.proto | 141 - .../transport_socket_deepcopy.gen.go | 279 - .../pbproxystate/transport_socket_json.gen.go | 154 - .../pbmesh/v2beta1/proxy_configuration.pb.go | 1227 --- .../pbmesh/v2beta1/proxy_configuration.proto | 173 - .../v2beta1/proxy_configuration_addon.go | 14 - .../v2beta1/proxy_configuration_addon_test.go | 53 - .../proxy_configuration_deepcopy.gen.go | 132 - .../v2beta1/proxy_configuration_json.gen.go | 77 - .../pbmesh/v2beta1/proxy_state.pb.binary.go | 28 - proto-public/pbmesh/v2beta1/proxy_state.pb.go | 551 - proto-public/pbmesh/v2beta1/proxy_state.proto | 56 - .../v2beta1/proxy_state_deepcopy.gen.go | 48 - .../pbmesh/v2beta1/proxy_state_json.gen.go | 33 - .../pbmesh/v2beta1/resources.rtypes.go | 127 - proto-public/pbmesh/v2beta1/routing.pb.go | 183 - .../pbmesh/v2beta1/tcp_route.pb.binary.go | 38 - proto-public/pbmesh/v2beta1/tcp_route.pb.go | 362 - proto-public/pbmesh/v2beta1/tcp_route.proto | 56 - .../pbmesh/v2beta1/tcp_route_deepcopy.gen.go | 69 - .../pbmesh/v2beta1/tcp_route_json.gen.go | 44 - proto-public/pbmesh/v2beta1/xroute_addons.go | 91 - .../pbmesh/v2beta1/xroute_addons_test.go | 173 - .../computed_exported_services.pb.binary.go | 38 - .../v2/computed_exported_services.pb.go | 367 - .../v2/computed_exported_services.proto | 28 - ...computed_exported_services_deepcopy.gen.go | 69 - .../v2/computed_exported_services_json.gen.go | 44 - .../v2/exported_services.pb.binary.go | 18 - .../pbmulticluster/v2/exported_services.pb.go | 190 - .../pbmulticluster/v2/exported_services.proto | 16 - .../exported_services_consumer.pb.binary.go | 18 - .../v2/exported_services_consumer.pb.go | 230 - .../v2/exported_services_consumer.proto | 17 - ...exported_services_consumer_deepcopy.gen.go | 27 - .../v2/exported_services_consumer_json.gen.go | 22 - .../v2/exported_services_deepcopy.gen.go | 27 - .../v2/exported_services_json.gen.go | 22 - .../namespace_exported_services.pb.binary.go | 18 - .../v2/namespace_exported_services.pb.go | 182 - .../v2/namespace_exported_services.proto | 15 - ...amespace_exported_services_deepcopy.gen.go | 27 - .../namespace_exported_services_json.gen.go | 22 - .../partition_exported_services.pb.binary.go | 18 - .../v2/partition_exported_services.pb.go | 182 - .../v2/partition_exported_services.proto | 15 - ...artition_exported_services_deepcopy.gen.go | 27 - .../partition_exported_services_json.gen.go | 22 - .../pbmulticluster/v2/resources.rtypes.go | 43 - .../v2beta1/resources.rtypes.go | 22 - .../v2beta1/sameness_group.pb.binary.go | 28 - .../v2beta1/sameness_group.pb.go | 292 - .../v2beta1/sameness_group.proto | 22 - .../v2beta1/sameness_group_deepcopy.gen.go | 48 - .../v2beta1/sameness_group_json.gen.go | 33 - .../pbresource/annotations.pb.binary.go | 18 - proto-public/pbresource/annotations.pb.go | 266 - proto-public/pbresource/annotations.proto | 25 - .../pbresource/annotations_deepcopy.gen.go | 27 - .../pbresource/annotations_json.gen.go | 22 - proto-public/pbresource/cloning_stream.pb.go | 31 - proto-public/pbresource/resource.pb.binary.go | 50 - proto-public/pbresource/resource.pb.go | 1084 +- proto-public/pbresource/resource.proto | 66 +- .../pbresource/resource_cloning_grpc.pb.go | 135 - .../pbresource/resource_deepcopy.gen.go | 573 - proto-public/pbresource/resource_grpc.pb.go | 48 - proto-public/pbresource/resource_json.gen.go | 308 - .../pbserverdiscovery/cloning_stream.pb.go | 31 - .../serverdiscovery_cloning_grpc.pb.go | 58 - .../serverdiscovery_deepcopy.gen.go | 69 - .../serverdiscovery_json.gen.go | 44 - .../pbtenancy/v2beta1/namespace.pb.binary.go | 18 - .../pbtenancy/v2beta1/namespace.pb.go | 174 - .../pbtenancy/v2beta1/namespace.proto | 19 - .../v2beta1/namespace_deepcopy.gen.go | 27 - .../pbtenancy/v2beta1/namespace_json.gen.go | 22 - .../pbtenancy/v2beta1/partition.pb.binary.go | 18 - .../pbtenancy/v2beta1/partition.pb.go | 173 - .../pbtenancy/v2beta1/partition.proto | 18 - .../v2beta1/partition_deepcopy.gen.go | 27 - .../pbtenancy/v2beta1/partition_json.gen.go | 22 - .../pbtenancy/v2beta1/resources.rtypes.go | 29 - proto/buf.gen.yaml | 7 +- proto/buf.yaml | 4 +- proto/private/pbacl/acl.go | 2 +- proto/private/pbacl/acl.pb.go | 2 +- proto/private/pbacl/acl.proto | 2 +- proto/private/pbautoconf/auto_config.go | 2 +- proto/private/pbautoconf/auto_config.pb.go | 2 +- proto/private/pbautoconf/auto_config.proto | 2 +- proto/private/pbautoconf/auto_config_ce.go | 3 +- proto/private/pbcommon/common.go | 2 +- proto/private/pbcommon/common.pb.go | 2 +- proto/private/pbcommon/common.proto | 2 +- proto/private/pbcommon/common_ce.go | 3 +- proto/private/pbcommon/convert_pbstruct.go | 2 +- .../private/pbcommon/convert_pbstruct_test.go | 2 +- proto/private/pbconfig/config.pb.go | 2 +- proto/private/pbconfig/config.proto | 2 +- .../private/pbconfigentry/config_entry.gen.go | 290 - proto/private/pbconfigentry/config_entry.go | 34 +- .../pbconfigentry/config_entry.pb.binary.go | 180 - .../private/pbconfigentry/config_entry.pb.go | 5922 ++++------ .../private/pbconfigentry/config_entry.proto | 178 +- .../private/pbconfigentry/config_entry_ce.go | 24 - .../pbconfigentry/config_entry_grpc.pb.go | 103 - proto/private/pbconnect/connect.gen.go | 4 - proto/private/pbconnect/connect.go | 2 +- proto/private/pbconnect/connect.pb.go | 100 +- proto/private/pbconnect/connect.proto | 7 +- proto/private/pbdemo/v1/demo.pb.binary.go | 30 - proto/private/pbdemo/v1/demo.pb.go | 339 +- proto/private/pbdemo/v1/demo.proto | 30 +- proto/private/pbdemo/v1/resources.rtypes.go | 50 - proto/private/pbdemo/v2/demo.pb.go | 123 +- proto/private/pbdemo/v2/demo.proto | 8 +- proto/private/pbdemo/v2/resources.rtypes.go | 29 - proto/private/pboperator/operator.pb.go | 2 +- proto/private/pboperator/operator.proto | 2 +- proto/private/pbpeering/peering.go | 2 +- proto/private/pbpeering/peering.pb.go | 4 +- proto/private/pbpeering/peering.proto | 4 +- proto/private/pbpeering/peering_ce.go | 3 +- proto/private/pbpeerstream/convert.go | 2 +- proto/private/pbpeerstream/peerstream.go | 2 +- proto/private/pbpeerstream/peerstream.pb.go | 2 +- proto/private/pbpeerstream/peerstream.proto | 2 +- proto/private/pbpeerstream/types.go | 2 +- proto/private/pbservice/convert.go | 2 +- proto/private/pbservice/convert_ce.go | 3 +- proto/private/pbservice/convert_ce_test.go | 3 +- proto/private/pbservice/convert_test.go | 2 +- proto/private/pbservice/healthcheck.pb.go | 2 +- proto/private/pbservice/healthcheck.proto | 2 +- proto/private/pbservice/ids.go | 2 +- proto/private/pbservice/ids_test.go | 2 +- proto/private/pbservice/node.pb.go | 2 +- proto/private/pbservice/node.proto | 2 +- proto/private/pbservice/service.pb.go | 2 +- proto/private/pbservice/service.proto | 2 +- proto/private/pbstorage/raft.pb.go | 2 +- proto/private/pbstorage/raft.proto | 2 +- proto/private/pbsubscribe/subscribe.go | 2 +- proto/private/pbsubscribe/subscribe.pb.go | 51 +- proto/private/pbsubscribe/subscribe.proto | 5 +- proto/private/prototest/golden_json.go | 37 - proto/private/prototest/testing.go | 9 +- scan.hcl | 2 +- sdk/LICENSE | 365 - sdk/freeport/ephemeral_darwin.go | 1 + sdk/freeport/ephemeral_darwin_test.go | 1 + sdk/freeport/ephemeral_fallback.go | 1 + sdk/freeport/ephemeral_linux.go | 1 + sdk/freeport/ephemeral_linux_test.go | 1 + sdk/freeport/systemlimit.go | 1 + sdk/freeport/systemlimit_windows.go | 1 + sdk/iptables/iptables.go | 32 +- sdk/iptables/iptables_executor_linux.go | 1 + sdk/iptables/iptables_executor_unsupported.go | 1 + sdk/iptables/iptables_test.go | 60 +- sdk/testutil/context.go | 8 +- sdk/testutil/io.go | 3 +- sdk/testutil/retry/counter.go | 26 - sdk/testutil/retry/doc.go | 22 - sdk/testutil/retry/interface.go | 35 - sdk/testutil/retry/output.go | 42 - sdk/testutil/retry/retry.go | 373 +- sdk/testutil/retry/retry_test.go | 125 +- sdk/testutil/retry/retryer.go | 36 - sdk/testutil/retry/run.go | 48 - sdk/testutil/retry/timer.go | 30 - sdk/testutil/server.go | 140 +- sdk/testutil/testlog.go | 6 +- sdk/testutil/types.go | 25 +- sentinel/evaluator.go | 2 +- sentinel/scope.go | 2 +- sentinel/sentinel_ce.go | 3 +- service_os/service.go | 2 +- service_os/service_windows.go | 3 +- snapshot/archive.go | 2 +- snapshot/archive_test.go | 2 +- snapshot/snapshot.go | 2 +- snapshot/snapshot_test.go | 2 +- test-integ/Makefile | 42 - test-integ/README.md | 208 - .../explicit_destinations_l7_test.go | 504 - .../catalogv2/explicit_destinations_test.go | 315 - test-integ/catalogv2/helpers_test.go | 31 - .../catalogv2/implicit_destinations_test.go | 244 - .../catalogv2/traffic_permissions_test.go | 459 - test-integ/connect/snapshot_test.go | 189 - test-integ/go.mod | 119 - test-integ/go.sum | 402 - test-integ/peering_commontopo/README.md | 66 - .../peering_commontopo/ac1_basic_test.go | 272 - .../ac2_disco_chain_test.go | 205 - .../ac3_service_defaults_upstream_test.go | 265 - .../ac4_proxy_defaults_test.go | 215 - .../ac5_1_no_svc_mesh_test.go | 131 - .../ac5_2_pq_failover_test.go | 411 - .../peering_commontopo/ac6_failovers_test.go | 478 - .../ac7_1_rotate_gw_test.go | 195 - .../ac7_2_rotate_leader_test.go | 217 - test-integ/peering_commontopo/commontopo.go | 526 - .../peering_commontopo/sharedtopology_test.go | 85 - test-integ/tenancy/client.go | 154 - test-integ/tenancy/common.go | 84 - test-integ/tenancy/namespace_ce_test.go | 84 - test-integ/topoutil/asserter.go | 441 - test-integ/topoutil/asserter_blankspace.go | 303 - test-integ/topoutil/blankspace.go | 124 - test-integ/topoutil/fixtures.go | 181 - test-integ/topoutil/http2.go | 32 - test-integ/topoutil/http_consul.go | 50 - test-integ/topoutil/naming_shim.go | 41 - test-integ/upgrade/basic/common.go | 255 - .../upgrade/basic/upgrade_basic_test.go | 39 - .../upgrade/l7_traffic_management/common.go | 229 - .../l7_traffic_management/resolver_test.go | 116 - test/bin/cluster.bash | 2 +- test/ca/generate.sh | 2 +- test/client_certs/generate.sh | 2 +- test/hostname/generate.sh | 2 +- .../envoy/Dockerfile-consul-envoy-windows | 12 - .../connect/envoy/Dockerfile-tcpdump-windows | 7 - .../envoy/Dockerfile-test-sds-server-windows | 8 - test/integration/connect/envoy/README.md | 1 - .../integration/connect/envoy/WINDOWS-TEST.md | 40 - .../capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../case-api-gateway-http-hostnames/setup.sh | 2 +- .../case-api-gateway-http-hostnames/vars.sh | 2 +- .../case-api-gateway-http-simple/capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../case-api-gateway-http-simple/setup.sh | 2 +- .../case-api-gateway-http-simple/vars.sh | 2 +- .../capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../service_s3.hcl | 2 +- .../setup.sh | 2 +- .../vars.sh | 2 +- .../capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../setup.sh | 2 +- .../vars.sh | 2 +- .../capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../case-api-gateway-tcp-conflicted/setup.sh | 2 +- .../case-api-gateway-tcp-conflicted/vars.sh | 2 +- .../case-api-gateway-tcp-simple/capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../case-api-gateway-tcp-simple/setup.sh | 2 +- .../envoy/case-api-gateway-tcp-simple/vars.sh | 2 +- .../capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../setup.sh | 2 +- .../vars.sh | 2 +- .../connect/envoy/case-badauthz/capture.sh | 2 +- .../connect/envoy/case-badauthz/setup.sh | 2 +- .../connect/envoy/case-basic/capture.sh | 2 +- .../connect/envoy/case-basic/setup.sh | 2 +- .../connect/envoy/case-centralconf/capture.sh | 2 +- .../envoy/case-centralconf/service_s1.hcl | 2 +- .../envoy/case-centralconf/service_s2.hcl | 2 +- .../connect/envoy/case-centralconf/setup.sh | 2 +- .../alpha/base.hcl | 2 +- .../alpha/service_gateway.hcl | 2 +- .../alpha/service_s1.hcl | 2 +- .../alpha/service_s2.hcl | 2 +- .../alpha/setup.sh | 2 +- .../bind.hcl | 2 +- .../capture.sh | 2 +- .../primary/base.hcl | 2 +- .../primary/service_s1.hcl | 2 +- .../primary/service_s2.hcl | 2 +- .../primary/setup.sh | 2 +- .../vars.sh | 2 +- .../bind.hcl | 2 +- .../capture.sh | 2 +- .../primary/setup.sh | 2 +- .../secondary/join.hcl | 2 +- .../secondary/service_gateway.hcl | 2 +- .../secondary/service_s1.hcl | 2 +- .../secondary/setup.sh | 2 +- .../vars.sh | 2 +- .../bind.hcl | 2 +- .../capture.sh | 2 +- .../primary/setup.sh | 2 +- .../secondary/join.hcl | 2 +- .../secondary/service_gateway.hcl | 2 +- .../secondary/service_s1.hcl | 2 +- .../secondary/setup.sh | 2 +- .../vars.sh | 2 +- .../service_s2-v1.hcl | 2 +- .../service_s2-v2.hcl | 2 +- .../case-cfg-resolver-defaultsubset/setup.sh | 2 +- .../case-cfg-resolver-defaultsubset/vars.sh | 2 +- .../case-cfg-resolver-features/capture.sh | 2 +- .../service_s2-v1.hcl | 2 +- .../service_s2-v2.hcl | 2 +- .../envoy/case-cfg-resolver-features/setup.sh | 2 +- .../envoy/case-cfg-resolver-features/vars.sh | 2 +- .../service_s2-v1.hcl | 2 +- .../setup.sh | 2 +- .../vars.sh | 2 +- .../service_s3-v1.hcl | 2 +- .../service_s3-v2.hcl | 2 +- .../service_s3.hcl | 2 +- .../setup.sh | 2 +- .../case-cfg-resolver-subset-redirect/vars.sh | 2 +- .../service_s3-v1.hcl | 2 +- .../service_s3-v2.hcl | 2 +- .../service_s3.hcl | 2 +- .../case-cfg-resolver-svc-failover/setup.sh | 2 +- .../case-cfg-resolver-svc-failover/vars.sh | 2 +- .../service_s3.hcl | 2 +- .../setup.sh | 2 +- .../vars.sh | 2 +- .../service_s3.hcl | 2 +- .../setup.sh | 2 +- .../vars.sh | 2 +- .../envoy/case-cfg-router-features/capture.sh | 2 +- .../service_s2-v1.hcl | 2 +- .../service_s2-v2.hcl | 2 +- .../envoy/case-cfg-router-features/setup.sh | 13 +- .../envoy/case-cfg-router-features/vars.sh | 2 +- .../case-cfg-router-features/verify.bats | 5 - .../alpha/base.hcl | 2 +- .../alpha/service_gateway.hcl | 2 +- .../alpha/service_s1.hcl | 2 +- .../alpha/service_s2.hcl | 2 +- .../alpha/setup.sh | 2 +- .../bind.hcl | 2 +- .../capture.sh | 2 +- .../primary/base.hcl | 2 +- .../primary/service_s1.hcl | 2 +- .../primary/service_s2.hcl | 2 +- .../primary/setup.sh | 2 +- .../case-cfg-splitter-cluster-peering/vars.sh | 2 +- .../case-cfg-splitter-features/capture.sh | 2 +- .../service_s2-v1.hcl | 2 +- .../service_s2-v2.hcl | 2 +- .../envoy/case-cfg-splitter-features/setup.sh | 2 +- .../envoy/case-cfg-splitter-features/vars.sh | 2 +- .../alpha/base.hcl | 2 +- .../alpha/service_gateway.hcl | 2 +- .../alpha/service_s1.hcl | 2 +- .../alpha/service_s2.hcl | 2 +- .../alpha/setup.sh | 2 +- .../bind.hcl | 2 +- .../capture.sh | 2 +- .../primary/base.hcl | 2 +- .../primary/service_ingress.hcl | 2 +- .../primary/setup.sh | 2 +- .../vars.sh | 2 +- .../connect/envoy/case-consul-exec/setup.sh | 2 +- .../connect/envoy/case-consul-exec/vars.sh | 2 +- .../alpha/base.hcl | 2 +- .../alpha/service_gateway.hcl | 2 +- .../alpha/service_s1.hcl | 2 +- .../alpha/service_s2.hcl | 2 +- .../alpha/setup.sh | 2 +- .../bind.hcl | 2 +- .../capture.sh | 2 +- .../primary/base.hcl | 2 +- .../primary/service_gateway.hcl | 2 +- .../primary/service_s1.hcl | 2 +- .../primary/service_s2.hcl | 2 +- .../primary/setup.sh | 2 +- .../case-cross-peer-control-plane-mgw/vars.sh | 2 +- .../alpha/base.hcl | 2 +- .../alpha/service_gateway.hcl | 2 +- .../alpha/service_s1.hcl | 2 +- .../alpha/service_s2.hcl | 2 +- .../alpha/service_s3.hcl | 2 +- .../alpha/setup.sh | 2 +- .../case-cross-peers-http-router/bind.hcl | 2 +- .../case-cross-peers-http-router/capture.sh | 2 +- .../primary/base.hcl | 2 +- .../primary/service_gateway.hcl | 2 +- .../primary/service_s1.hcl | 2 +- .../primary/service_s2.hcl | 2 +- .../primary/setup.sh | 2 +- .../case-cross-peers-http-router/vars.sh | 2 +- .../case-cross-peers-http/alpha/base.hcl | 2 +- .../alpha/service_gateway.hcl | 2 +- .../alpha/service_s1.hcl | 2 +- .../alpha/service_s2.hcl | 2 +- .../case-cross-peers-http/alpha/setup.sh | 2 +- .../envoy/case-cross-peers-http/bind.hcl | 2 +- .../envoy/case-cross-peers-http/capture.sh | 2 +- .../case-cross-peers-http/primary/base.hcl | 2 +- .../primary/service_gateway.hcl | 2 +- .../primary/service_s1.hcl | 2 +- .../primary/service_s2.hcl | 2 +- .../case-cross-peers-http/primary/setup.sh | 2 +- .../envoy/case-cross-peers-http/vars.sh | 2 +- .../alpha/base.hcl | 2 +- .../alpha/service_gateway.hcl | 2 +- .../alpha/service_s1.hcl | 2 +- .../alpha/service_s2.hcl | 2 +- .../alpha/service_s3.hcl | 2 +- .../alpha/setup.sh | 2 +- .../bind.hcl | 2 +- .../capture.sh | 2 +- .../primary/base.hcl | 2 +- .../primary/service_gateway.hcl | 2 +- .../primary/service_s1.hcl | 2 +- .../primary/service_s2.hcl | 2 +- .../primary/setup.sh | 2 +- .../vars.sh | 2 +- .../envoy/case-cross-peers/alpha/base.hcl | 2 +- .../alpha/service_gateway.hcl | 2 +- .../case-cross-peers/alpha/service_s1.hcl | 2 +- .../case-cross-peers/alpha/service_s2.hcl | 2 +- .../envoy/case-cross-peers/alpha/setup.sh | 2 +- .../connect/envoy/case-cross-peers/bind.hcl | 2 +- .../connect/envoy/case-cross-peers/capture.sh | 2 +- .../envoy/case-cross-peers/primary/base.hcl | 2 +- .../primary/service_gateway.hcl | 2 +- .../case-cross-peers/primary/service_s1.hcl | 2 +- .../case-cross-peers/primary/service_s2.hcl | 2 +- .../envoy/case-cross-peers/primary/setup.sh | 2 +- .../connect/envoy/case-cross-peers/vars.sh | 2 +- .../envoy/case-dogstatsd-udp/service_s1.hcl | 2 +- .../connect/envoy/case-dogstatsd-udp/setup.sh | 2 +- .../connect/envoy/case-dogstatsd-udp/vars.sh | 2 +- .../envoy/case-dogstatsd-udp/verify.bats | 9 +- .../envoy/case-expose-checks/capture.sh | 2 +- .../envoy/case-expose-checks/service_s1.hcl | 2 +- .../envoy/case-expose-checks/service_s2.hcl | 2 +- .../connect/envoy/case-expose-checks/setup.sh | 2 +- .../case-gateway-without-services/bind.hcl | 2 +- .../case-gateway-without-services/capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../service_s1.hcl | 2 +- .../service_s2.hcl | 2 +- .../case-gateway-without-services/setup.sh | 2 +- .../case-gateway-without-services/vars.sh | 2 +- .../envoy/case-gateways-local/bind.hcl | 2 +- .../envoy/case-gateways-local/capture.sh | 2 +- .../primary/service_gateway.hcl | 2 +- .../primary/service_s1.hcl | 2 +- .../primary/service_s2.hcl | 2 +- .../case-gateways-local/primary/setup.sh | 2 +- .../case-gateways-local/secondary/join.hcl | 2 +- .../secondary/service_gateway.hcl | 2 +- .../secondary/service_s1.hcl | 2 +- .../case-gateways-local/secondary/setup.sh | 4 +- .../connect/envoy/case-gateways-local/vars.sh | 2 +- .../envoy/case-gateways-remote/bind.hcl | 2 +- .../envoy/case-gateways-remote/capture.sh | 2 +- .../primary/service_s1.hcl | 2 +- .../primary/service_s2.hcl | 2 +- .../case-gateways-remote/primary/setup.sh | 2 +- .../case-gateways-remote/secondary/join.hcl | 2 +- .../secondary/service_gateway.hcl | 2 +- .../secondary/service_s1.hcl | 2 +- .../case-gateways-remote/secondary/setup.sh | 2 +- .../envoy/case-gateways-remote/vars.sh | 2 +- .../connect/envoy/case-grpc/service_s1.hcl | 4 +- .../connect/envoy/case-grpc/service_s2.hcl | 2 +- .../connect/envoy/case-grpc/setup.sh | 2 +- .../connect/envoy/case-grpc/vars.sh | 2 +- .../connect/envoy/case-grpc/verify.bats | 2 +- .../envoy/case-http-badauthz/capture.sh | 2 +- .../envoy/case-http-badauthz/service_s1.hcl | 2 +- .../envoy/case-http-badauthz/service_s2.hcl | 2 +- .../connect/envoy/case-http-badauthz/setup.sh | 6 +- .../connect/envoy/case-http/capture.sh | 2 +- .../connect/envoy/case-http/service_s1.hcl | 2 +- .../connect/envoy/case-http/service_s2.hcl | 2 +- .../connect/envoy/case-http/setup.sh | 2 +- .../case-ingress-gateway-grpc/capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../case-ingress-gateway-grpc/service_s1.hcl | 2 +- .../envoy/case-ingress-gateway-grpc/setup.sh | 2 +- .../envoy/case-ingress-gateway-grpc/vars.sh | 2 +- .../case-ingress-gateway-http/capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../envoy/case-ingress-gateway-http/setup.sh | 2 +- .../envoy/case-ingress-gateway-http/vars.sh | 2 +- .../capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../setup.sh | 2 +- .../vars.sh | 2 +- .../alpha/base.hcl | 2 +- .../alpha/service_gateway.hcl | 2 +- .../alpha/service_s1.hcl | 2 +- .../alpha/service_s2.hcl | 2 +- .../alpha/setup.sh | 2 +- .../bind.hcl | 2 +- .../capture.sh | 2 +- .../primary/base.hcl | 2 +- .../primary/service_ingress.hcl | 2 +- .../primary/service_s1.hcl | 2 +- .../primary/service_s2.hcl | 2 +- .../primary/setup.sh | 2 +- .../vars.sh | 2 +- .../envoy/case-ingress-gateway-sds/capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../envoy/case-ingress-gateway-sds/setup.sh | 2 +- .../envoy/case-ingress-gateway-sds/vars.sh | 2 +- .../case-ingress-gateway-simple/capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../case-ingress-gateway-simple/setup.sh | 2 +- .../envoy/case-ingress-gateway-simple/vars.sh | 2 +- .../envoy/case-ingress-gateway-tls/capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../envoy/case-ingress-gateway-tls/setup.sh | 2 +- .../envoy/case-ingress-gateway-tls/vars.sh | 2 +- .../case-ingress-gateway-tls/verify.bats | 10 +- .../bind.hcl | 2 +- .../capture.sh | 2 +- .../primary/service_gateway.hcl | 2 +- .../primary/service_ingress.hcl | 2 +- .../primary/service_s1.hcl | 2 +- .../primary/service_s2.hcl | 2 +- .../primary/setup.sh | 2 +- .../secondary/join.hcl | 2 +- .../secondary/service_gateway.hcl | 2 +- .../secondary/setup.sh | 2 +- .../vars.sh | 2 +- .../connect/envoy/case-l7-intentions/acl.hcl | 2 +- .../envoy/case-l7-intentions/capture.sh | 2 +- .../connect/envoy/case-l7-intentions/setup.sh | 2 +- .../connect/envoy/case-lua/capture.sh | 2 +- .../connect/envoy/case-lua/service_s1.hcl | 2 +- .../connect/envoy/case-lua/service_s2.hcl | 2 +- .../connect/envoy/case-lua/setup.sh | 2 +- .../connect/envoy/case-lua/vars.sh | 2 +- .../envoy/case-mesh-to-lambda/capture.sh | 2 +- .../case-mesh-to-lambda/service_gateway.hcl | 2 +- .../envoy/case-mesh-to-lambda/service_s1.hcl | 2 +- .../envoy/case-mesh-to-lambda/setup.sh | 2 +- .../connect/envoy/case-mesh-to-lambda/vars.sh | 2 +- .../envoy/case-multidc-rsa-ca/bind.hcl | 2 +- .../envoy/case-multidc-rsa-ca/ca_config.hcl | 2 +- .../envoy/case-multidc-rsa-ca/capture.sh | 2 +- .../primary/service_s1.hcl | 2 +- .../primary/service_s2.hcl | 2 +- .../case-multidc-rsa-ca/primary/setup.sh | 2 +- .../case-multidc-rsa-ca/secondary/join.hcl | 2 +- .../secondary/service_s1.hcl | 2 +- .../case-multidc-rsa-ca/secondary/setup.sh | 2 +- .../connect/envoy/case-multidc-rsa-ca/vars.sh | 2 +- .../connect/envoy/case-prometheus/capture.sh | 2 +- .../envoy/case-prometheus/service_s1.hcl | 2 +- .../envoy/case-prometheus/service_s2.hcl | 2 +- .../connect/envoy/case-prometheus/setup.sh | 2 +- .../envoy/case-property-override/capture.sh | 2 +- .../case-property-override/service_s1.hcl | 2 +- .../case-property-override/service_s2.hcl | 2 +- .../case-property-override/service_s3.hcl | 2 +- .../envoy/case-property-override/setup.sh | 2 +- .../envoy/case-property-override/vars.sh | 2 +- .../envoy/case-stats-proxy/service_s1.hcl | 2 +- .../envoy/case-stats-proxy/service_s2.hcl | 2 +- .../connect/envoy/case-stats-proxy/setup.sh | 2 +- .../envoy/case-stats-proxy/verify.bats | 2 +- .../envoy/case-statsd-udp/service_s1.hcl | 2 +- .../connect/envoy/case-statsd-udp/setup.sh | 2 +- .../connect/envoy/case-statsd-udp/vars.sh | 2 +- .../capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../service_s1.hcl | 2 +- .../service_s4.hcl | 2 +- .../setup.sh | 2 +- .../vars.sh | 2 +- .../service_gateway.hcl | 2 +- .../case-terminating-gateway-simple/setup.sh | 2 +- .../case-terminating-gateway-simple/vars.sh | 2 +- .../capture.sh | 2 +- .../service_gateway.hcl | 2 +- .../service_s2-v1.hcl | 2 +- .../service_s2-v2.hcl | 2 +- .../service_s3.hcl | 2 +- .../case-terminating-gateway-subsets/setup.sh | 2 +- .../case-terminating-gateway-subsets/vars.sh | 2 +- .../bind.hcl | 2 +- .../service_gateway.hcl | 2 +- .../service_s1.hcl | 2 +- .../service_s2.hcl | 2 +- .../setup.sh | 2 +- .../vars.sh | 2 +- .../envoy/case-upstream-config/service_s1.hcl | 2 +- .../envoy/case-upstream-config/service_s2.hcl | 2 +- .../envoy/case-upstream-config/setup.sh | 2 +- .../connect/envoy/case-wanfed-gw/bind.hcl | 2 +- .../connect/envoy/case-wanfed-gw/capture.sh | 2 +- .../case-wanfed-gw/global-setup-windows.sh | 47 - .../envoy/case-wanfed-gw/global-setup.sh | 2 +- .../envoy/case-wanfed-gw/primary/common.hcl | 2 +- .../envoy/case-wanfed-gw/primary/server.hcl | 2 +- .../primary/service_gateway.hcl | 2 +- .../case-wanfed-gw/primary/service_s1.hcl | 2 +- .../case-wanfed-gw/primary/service_s2.hcl | 2 +- .../envoy/case-wanfed-gw/primary/setup.sh | 2 +- .../envoy/case-wanfed-gw/secondary/common.hcl | 2 +- .../envoy/case-wanfed-gw/secondary/server.hcl | 2 +- .../secondary/service_gateway.hcl | 2 +- .../case-wanfed-gw/secondary/service_s1.hcl | 2 +- .../case-wanfed-gw/secondary/service_s2.hcl | 2 +- .../envoy/case-wanfed-gw/secondary/setup.sh | 2 +- .../connect/envoy/case-wanfed-gw/vars.sh | 2 +- .../connect/envoy/case-wasm/capture.sh | 2 +- .../connect/envoy/case-wasm/service_s1.hcl | 2 +- .../connect/envoy/case-wasm/service_s2.hcl | 2 +- .../connect/envoy/case-wasm/setup.sh | 2 +- .../connect/envoy/case-wasm/vars.sh | 2 +- .../connect/envoy/case-zipkin/service_s1.hcl | 2 +- .../connect/envoy/case-zipkin/service_s2.hcl | 2 +- .../connect/envoy/case-zipkin/setup.sh | 2 +- .../connect/envoy/case-zipkin/vars.sh | 2 +- .../connect/envoy/case-zipkin/verify.bats | 7 +- .../connect/envoy/consul-base-cfg/base.hcl | 2 +- .../envoy/consul-base-cfg/service_s1.hcl | 2 +- .../envoy/consul-base-cfg/service_s2.hcl | 2 +- test/integration/connect/envoy/defaults.sh | 2 +- .../connect/envoy/docker-windows.md | 42 - .../connect/envoy/docs/img/linux-arch.png | Bin 63964 -> 0 bytes .../docs/img/windows-arch-singlecontainer.png | Bin 114040 -> 0 bytes .../envoy/docs/img/windows-linux-arch.png | Bin 61475 -> 0 bytes .../docs/windows-testing-architecture.md | 106 - test/integration/connect/envoy/down.sh | 2 +- test/integration/connect/envoy/helpers.bash | 19 +- .../connect/envoy/helpers.windows.bash | 1195 --- test/integration/connect/envoy/main_test.go | 141 +- test/integration/connect/envoy/run-tests.sh | 16 +- .../connect/envoy/run-tests.windows.sh | 916 -- .../connect/envoy/test-sds-server/Dockerfile | 2 +- .../envoy/test-sds-server/certs/gen-certs.sh | 2 +- .../connect/envoy/test-sds-server/sds.go | 2 +- .../connect/envoy/windows-troubleshooting.md | 90 - .../assets/Dockerfile-consul-dataplane | 31 - test/integration/consul-container/go.mod | 69 +- test/integration/consul-container/go.sum | 472 +- .../consul-container/libs/assert/common.go | 2 +- .../consul-container/libs/assert/envoy.go | 74 +- .../consul-container/libs/assert/grpc.go | 3 +- .../consul-container/libs/assert/peering.go | 16 +- .../consul-container/libs/assert/service.go | 99 +- .../consul-container/libs/cluster/agent.go | 77 +- .../consul-container/libs/cluster/app.go | 2 +- .../consul-container/libs/cluster/builder.go | 7 +- .../consul-container/libs/cluster/cluster.go | 28 +- .../consul-container/libs/cluster/config.go | 2 +- .../libs/cluster/container.go | 99 +- .../libs/cluster/dataplane.go | 171 - .../libs/cluster/encryption.go | 140 +- .../consul-container/libs/cluster/log.go | 2 +- .../consul-container/libs/cluster/network.go | 3 +- .../consul-container/libs/service/common.go | 4 +- .../consul-container/libs/service/connect.go | 22 +- .../consul-container/libs/service/examples.go | 40 +- .../consul-container/libs/service/gateway.go | 7 +- .../consul-container/libs/service/helpers.go | 122 +- .../consul-container/libs/service/log.go | 2 +- .../consul-container/libs/service/service.go | 2 +- .../libs/topology/peering_topology.go | 33 +- .../libs/topology/service_topology.go | 14 +- .../consul-container/libs/utils/debug.go | 2 +- .../consul-container/libs/utils/defer.go | 2 +- .../consul-container/libs/utils/docker.go | 17 +- .../consul-container/libs/utils/helpers.go | 2 +- .../consul-container/libs/utils/retry.go | 2 +- .../consul-container/libs/utils/tenancy.go | 14 +- .../consul-container/libs/utils/utils.go | 2 +- .../consul-container/libs/utils/version.go | 42 +- .../consul-container/libs/utils/version_ce.go | 3 +- .../test/basic/connect_service_test.go | 57 +- .../test/catalog/catalog_test.go | 43 - .../consul_envoy_version.go | 2 +- .../consul-container/test/debugging.md | 78 - .../test/envoy_extensions/ext_authz_test.go | 21 +- .../otel_access_logging_test.go | 135 - .../testdata/otel/config.yaml | 30 - .../testdata/wasm_test_files/Dockerfile | 6 - .../testdata/wasm_test_files/README.md | 14 - .../testdata/wasm_test_files/build.sh | 6 - .../testdata/wasm_test_files/go.mod | 5 - .../testdata/wasm_test_files/go.sum | 6 - .../testdata/wasm_test_files/nginx.conf | 13 - .../wasm_test_files/wasm_add_header.go | 50 - .../wasm_test_files/wasm_add_header.wasm | Bin 400008 -> 0 bytes .../test/envoy_extensions/wasm_test.go | 464 - .../test/gateways/gateway_endpoint_test.go | 27 +- .../test/gateways/http_route_test.go | 263 +- .../test/gateways/ingress_gateway_test.go | 4 +- .../test/gateways/tenancy_ce.go | 1 + .../test/gateways/terminating_gateway_test.go | 189 - .../test/jwtauth/jwt_auth_test.go | 2 +- .../test/observability/access_logs_test.go | 4 +- .../test/observability/metrics_leader_test.go | 2 +- .../rotate_server_and_ca_then_fail_test.go | 2 +- .../test/ratelimit/ratelimit_test.go | 210 +- .../test/resource/grpc_forwarding_test.go | 188 - .../resource/http_api/acl_enabled_test.go | 223 - .../test/resource/http_api/client/client.go | 312 - .../test/resource/http_api/helper.go | 213 - .../test/snapshot/snapshot_restore_test.go | 2 +- .../test/tproxy/tproxy_test.go | 6 +- .../test/trafficpermissions/tcp_test.go | 555 - .../test/troubleshoot/troubleshoot_test.go | 4 +- .../test/upgrade/acl_node_test.go | 2 +- .../test/upgrade/basic/basic_test.go | 2 +- .../upgrade/basic/fullstopupgrade_test.go | 100 + .../test/upgrade/basic/healthcheck_test.go | 2 +- .../test/upgrade/catalog/catalog_test.go | 88 - .../consul-container/test/upgrade/common.go | 2 +- .../test/upgrade/ingress_gateway_grpc_test.go | 2 +- .../test/upgrade/ingress_gateway_sds_test.go | 2 +- .../test/upgrade/ingress_gateway_test.go | 2 +- .../resolver_default_subset_test.go | 6 +- .../peering/peering_control_plane_mgw_test.go | 2 +- .../test/upgrade/peering/peering_http_test.go | 2 +- .../test/util/test_debug_breakpoint_hit.png | Bin 654866 -> 0 bytes .../test/util/test_debug_configuration.png | Bin 294046 -> 0 bytes .../test/util/test_debug_info.png | Bin 622325 -> 0 bytes .../util/test_debug_remote_configuration.png | Bin 285715 -> 0 bytes .../test/util/test_debug_remote_connected.png | Bin 111968 -> 0 bytes .../test/util/test_debug_resume_program.png | Bin 51972 -> 0 bytes .../wanfed/acl_bootstrap_replication_test.go | 2 +- .../test/wanfed/wanfed_peering_test.go | 2 +- test/load/packer/consul-ami/consul.pkr.hcl | 2 +- test/load/packer/consul-ami/scripts/conf.yaml | 2 +- .../packer/consul-ami/scripts/datadog.yaml | 2 +- .../packer/consul-ami/scripts/move-files.sh | 2 +- .../load/packer/loadtest-ami/loadtest.pkr.hcl | 2 +- .../packer/loadtest-ami/scripts/install-k6.sh | 2 +- .../packer/loadtest-ami/scripts/loadtest.js | 2 +- test/load/terraform/consul.tf | 2 +- test/load/terraform/main.tf | 2 +- test/load/terraform/outputs.tf | 2 +- test/load/terraform/providers.tf | 2 +- test/load/terraform/test-servers.tf | 2 +- test/load/terraform/user-data-client.sh | 2 +- test/load/terraform/user-data-server.sh | 2 +- test/load/terraform/variables.tf | 2 +- testing/deployer/.gitignore | 4 - testing/deployer/README.md | 179 - testing/deployer/TODO.md | 9 - testing/deployer/go.mod | 68 - testing/deployer/go.sum | 318 - testing/deployer/sprawl/acl.go | 387 - testing/deployer/sprawl/acl_rules.go | 207 - testing/deployer/sprawl/boot.go | 605 -- testing/deployer/sprawl/catalog.go | 990 -- testing/deployer/sprawl/configentries.go | 61 - testing/deployer/sprawl/consul.go | 119 - testing/deployer/sprawl/debug.go | 11 - testing/deployer/sprawl/details.go | 199 - testing/deployer/sprawl/ent.go | 177 - testing/deployer/sprawl/grpc.go | 42 - testing/deployer/sprawl/helpers.go | 14 - .../deployer/sprawl/internal/build/docker.go | 172 - .../deployer/sprawl/internal/runner/exec.go | 123 - .../deployer/sprawl/internal/secrets/store.go | 84 - .../deployer/sprawl/internal/tfgen/agent.go | 296 - .../deployer/sprawl/internal/tfgen/digest.go | 48 - testing/deployer/sprawl/internal/tfgen/dns.go | 263 - .../deployer/sprawl/internal/tfgen/docker.go | 45 - .../sprawl/internal/tfgen/docker_test.go | 17 - testing/deployer/sprawl/internal/tfgen/gen.go | 481 - testing/deployer/sprawl/internal/tfgen/io.go | 73 - .../deployer/sprawl/internal/tfgen/nodes.go | 163 - .../deployer/sprawl/internal/tfgen/prelude.go | 19 - .../deployer/sprawl/internal/tfgen/proxy.go | 87 - testing/deployer/sprawl/internal/tfgen/res.go | 98 - .../templates/container-app-dataplane.tf.tmpl | 65 - .../templates/container-app-sidecar.tf.tmpl | 37 - .../tfgen/templates/container-app.tf.tmpl | 25 - .../tfgen/templates/container-consul.tf.tmpl | 37 - .../tfgen/templates/container-coredns.tf.tmpl | 28 - .../templates/container-mgw-dataplane.tf.tmpl | 45 - .../tfgen/templates/container-mgw.tf.tmpl | 39 - .../tfgen/templates/container-pause.tf.tmpl | 38 - .../tfgen/templates/container-proxy.tf.tmpl | 33 - .../deployer/sprawl/internal/tfgen/tfgen.go | 32 - testing/deployer/sprawl/network_area_ce.go | 14 - testing/deployer/sprawl/peering.go | 242 - testing/deployer/sprawl/resources.go | 22 - testing/deployer/sprawl/sprawl.go | 765 -- .../deployer/sprawl/sprawltest/sprawltest.go | 218 - .../deployer/sprawl/sprawltest/test_test.go | 351 - testing/deployer/sprawl/tls.go | 143 - testing/deployer/topology/compile.go | 962 -- testing/deployer/topology/default_versions.go | 13 - testing/deployer/topology/ids.go | 128 - testing/deployer/topology/images.go | 154 - testing/deployer/topology/images_test.go | 101 - testing/deployer/topology/naming_shim.go | 43 - testing/deployer/topology/relationships.go | 97 - testing/deployer/topology/topology.go | 1107 -- testing/deployer/topology/util.go | 20 - testing/deployer/topology/util_test.go | 14 - testing/deployer/update-latest-versions.sh | 59 - testing/deployer/util/consul.go | 114 - testing/deployer/util/files.go | 60 - .../deployer/util/internal/ipamutils/doc.go | 21 - .../deployer/util/internal/ipamutils/utils.go | 120 - .../util/internal/ipamutils/utils_test.go | 105 - testing/deployer/util/net.go | 20 - testing/deployer/util/v2.go | 92 - testing/deployer/util/v2_decode.go | 91 - testrpc/wait.go | 34 +- tlsutil/config.go | 27 +- tlsutil/config_test.go | 11 +- tlsutil/generate.go | 2 +- tlsutil/generate_test.go | 2 +- tlsutil/mock.go | 2 +- tools/internal-grpc-proxy/main.go | 2 +- troubleshoot/go.mod | 15 +- troubleshoot/go.sum | 8 +- troubleshoot/ports/troubleshoot_ports_test.go | 2 +- troubleshoot/proxy/certs.go | 2 +- troubleshoot/proxy/certs_test.go | 2 +- troubleshoot/proxy/stats.go | 2 +- troubleshoot/proxy/troubleshoot_proxy.go | 2 +- troubleshoot/proxy/upstreams.go | 2 +- troubleshoot/proxy/upstreams_test.go | 2 +- troubleshoot/proxy/utils.go | 2 +- troubleshoot/proxy/validateupstream.go | 2 +- troubleshoot/proxy/validateupstream_test.go | 2 +- troubleshoot/validate/validate.go | 2 +- troubleshoot/validate/validate_test.go | 2 +- types/area.go | 2 +- types/checks.go | 2 +- types/node_id.go | 2 +- types/tls.go | 2 +- types/tls_test.go | 2 +- ui/.nvmrc | 2 +- ui/package.json | 4 +- .../components/consul/acl/selector/index.hbs | 130 +- .../consul/token/selector/README.mdx | 2 +- .../consul/token/selector/index.hbs | 309 +- .../components/consul/token/selector/index.js | 2 +- .../consul-acls/vendor/consul-acls/routes.js | 2 +- .../vendor/consul-acls/services.js | 2 +- .../app/components/consul/hcp/home/index.hbs | 15 + .../app/components/consul/hcp/home/index.scss | 16 + .../components/consul/hcp/home/index.test.js | 73 + ui/packages/consul-hcp/package.json | 5 + .../consul-hcp/vendor/consul-hcp/routes.js | 14 + .../consul-hcp/vendor/consul-hcp/services.js | 14 + .../consul/lock-session/form/index.hbs | 2 +- .../consul/lock-session/form/index.scss | 2 +- .../consul/lock-session/list/index.hbs | 6 +- .../consul/lock-session/list/index.scss | 2 +- .../lock-session/notifications/index.hbs | 2 +- .../app/templates/dc/nodes/show/sessions.hbs | 2 +- .../vendor/consul-lock-sessions/routes.js | 2 +- .../vendor/consul-lock-sessions/services.js | 2 +- .../components/consul/nspace/form/index.hbs | 2 +- .../components/consul/nspace/form/index.js | 2 +- .../components/consul/nspace/list/index.hbs | 2 +- .../consul/nspace/list/pageobject.js | 2 +- .../consul/nspace/notifications/index.hbs | 2 +- .../consul/nspace/search-bar/index.hbs | 2 +- .../consul/nspace/selector/index.hbs | 147 +- .../app/templates/dc/nspaces/edit.hbs | 2 +- .../app/templates/dc/nspaces/index.hbs | 2 +- .../vendor/consul-nspaces/routes.js | 2 +- .../vendor/consul-nspaces/services.js | 2 +- .../consul/partition/form/index.hbs | 2 +- .../consul/partition/list/index.hbs | 2 +- .../consul/partition/list/test-support.js | 2 +- .../consul/partition/notifications/index.hbs | 2 +- .../consul/partition/search-bar/index.hbs | 2 +- .../consul/partition/selector/index.hbs | 112 +- .../app/templates/dc/partitions/edit.hbs | 2 +- .../app/templates/dc/partitions/index.hbs | 2 +- .../vendor/consul-partitions/routes.js | 2 +- .../vendor/consul-partitions/services.js | 2 +- .../consul/peer/address/list/index.hbs | 4 +- .../consul/peer/address/list/index.scss | 2 +- .../consul/peer/bento-box/index.hbs | 2 +- .../components/consul/peer/components.scss | 2 +- .../consul/peer/form/chart.xstate.js | 2 +- .../peer/form/generate/actions/index.hbs | 2 +- .../consul/peer/form/generate/chart.xstate.js | 2 +- .../peer/form/generate/fieldsets/index.hbs | 2 +- .../peer/form/generate/fieldsets/index.js | 2 +- .../consul/peer/form/generate/index.hbs | 2 +- .../app/components/consul/peer/form/index.hbs | 2 +- .../components/consul/peer/form/index.scss | 2 +- .../peer/form/initiate/actions/index.hbs | 2 +- .../peer/form/initiate/fieldsets/index.hbs | 2 +- .../consul/peer/form/initiate/index.hbs | 2 +- .../consul/peer/form/token/actions/index.hbs | 2 +- .../peer/form/token/fieldsets/index.hbs | 4 +- .../app/components/consul/peer/index.scss | 2 +- .../app/components/consul/peer/list/index.hbs | 2 +- .../consul/peer/list/test-support.js | 2 +- .../consul/peer/notifications/index.hbs | 2 +- .../consul/peer/search-bar/index.hbs | 2 +- .../consul/peer/search-bar/index.scss | 2 +- .../components/consul/peer/selector/index.hbs | 33 +- .../app/controllers/dc/peers/index.js | 2 +- .../app/controllers/dc/peers/show/exported.js | 2 +- .../app/controllers/dc/peers/show/index.js | 2 +- .../app/templates/dc/peers/index.hbs | 2 +- .../app/templates/dc/peers/show.hbs | 2 +- .../app/templates/dc/peers/show/addresses.hbs | 2 +- .../app/templates/dc/peers/show/exported.hbs | 2 +- .../app/templates/dc/peers/show/imported.hbs | 3 +- .../app/templates/dc/peers/show/index.hbs | 2 +- .../vendor/consul-peerings/routes.js | 2 +- .../vendor/consul-peerings/services.js | 2 +- ui/packages/consul-ui/.docfy-config.js | 2 +- ui/packages/consul-ui/.eslintrc.js | 2 +- ui/packages/consul-ui/.istanbul.yml | 2 +- ui/packages/consul-ui/.prettierrc.js | 2 +- ui/packages/consul-ui/.template-lintrc.js | 2 +- ui/packages/consul-ui/README.md | 2 +- ui/packages/consul-ui/app/abilities/acl.js | 2 +- .../consul-ui/app/abilities/auth-method.js | 2 +- ui/packages/consul-ui/app/abilities/base.js | 2 +- .../consul-ui/app/abilities/intention.js | 2 +- ui/packages/consul-ui/app/abilities/kv.js | 2 +- .../consul-ui/app/abilities/license.js | 2 +- ui/packages/consul-ui/app/abilities/node.js | 2 +- ui/packages/consul-ui/app/abilities/nspace.js | 2 +- .../consul-ui/app/abilities/operator.js | 10 - .../consul-ui/app/abilities/overview.js | 2 +- .../consul-ui/app/abilities/partition.js | 2 +- ui/packages/consul-ui/app/abilities/peer.js | 2 +- .../consul-ui/app/abilities/permission.js | 2 +- ui/packages/consul-ui/app/abilities/policy.js | 2 +- ui/packages/consul-ui/app/abilities/role.js | 2 +- ui/packages/consul-ui/app/abilities/server.js | 2 +- .../app/abilities/service-instance.js | 2 +- .../consul-ui/app/abilities/session.js | 2 +- ui/packages/consul-ui/app/abilities/token.js | 2 +- .../consul-ui/app/abilities/upstream.js | 2 +- .../consul-ui/app/abilities/zervice.js | 2 +- ui/packages/consul-ui/app/abilities/zone.js | 2 +- .../consul-ui/app/adapters/application.js | 2 +- .../consul-ui/app/adapters/auth-method.js | 2 +- .../consul-ui/app/adapters/binding-rule.js | 2 +- .../consul-ui/app/adapters/coordinate.js | 2 +- .../consul-ui/app/adapters/discovery-chain.js | 2 +- ui/packages/consul-ui/app/adapters/http.js | 2 +- .../consul-ui/app/adapters/intention.js | 2 +- ui/packages/consul-ui/app/adapters/kv.js | 2 +- ui/packages/consul-ui/app/adapters/node.js | 2 +- ui/packages/consul-ui/app/adapters/nspace.js | 2 +- .../consul-ui/app/adapters/oidc-provider.js | 2 +- .../consul-ui/app/adapters/partition.js | 2 +- .../consul-ui/app/adapters/permission.js | 2 +- ui/packages/consul-ui/app/adapters/policy.js | 2 +- ui/packages/consul-ui/app/adapters/proxy.js | 2 +- ui/packages/consul-ui/app/adapters/role.js | 2 +- .../app/adapters/service-instance.js | 2 +- ui/packages/consul-ui/app/adapters/service.js | 2 +- ui/packages/consul-ui/app/adapters/session.js | 2 +- ui/packages/consul-ui/app/adapters/token.js | 2 +- .../consul-ui/app/adapters/topology.js | 2 +- ui/packages/consul-ui/app/app.js | 2 +- .../consul-ui/app/components/action/index.hbs | 2 +- .../app/components/anchors/index.scss | 2 +- .../app/components/anchors/skin.scss | 2 +- .../app/components/anonymous/index.hbs | 2 +- .../app/components/anonymous/index.js | 2 +- .../app/components/app-error/index.hbs | 2 +- .../app/components/app-view/index.hbs | 2 +- .../app/components/app-view/index.js | 2 +- .../app/components/app-view/index.scss | 2 +- .../app/components/app-view/layout.scss | 2 +- .../app/components/app-view/skin.scss | 2 +- .../consul-ui/app/components/app/index.hbs | 107 +- .../consul-ui/app/components/app/index.js | 2 +- .../consul-ui/app/components/app/index.scss | 127 +- .../app/components/app/notification/index.hbs | 2 +- .../app/components/aria-menu/index.hbs | 2 +- .../app/components/aria-menu/index.js | 2 +- .../app/components/auth-dialog/README.mdx | 3 + .../components/auth-dialog/chart.xstate.js | 2 +- .../app/components/auth-dialog/index.hbs | 2 +- .../app/components/auth-dialog/index.js | 2 +- .../app/components/auth-form/chart.xstate.js | 2 +- .../app/components/auth-form/index.hbs | 2 +- .../app/components/auth-form/index.js | 2 +- .../app/components/auth-form/index.scss | 2 +- .../app/components/auth-form/layout.scss | 2 +- .../app/components/auth-form/pageobject.js | 2 +- .../app/components/auth-form/skin.scss | 2 +- .../app/components/auth-form/tabs.xstate.js | 2 +- .../app/components/auth-modal/index.scss | 2 +- .../app/components/auth-modal/layout.scss | 2 +- .../app/components/auth-modal/skin.scss | 2 +- .../app/components/auth-profile/README.mdx | 24 + .../app/components/auth-profile/index.hbs | 17 + .../app/components/auth-profile/index.scss | 24 + .../consul-ui/app/components/badge/debug.scss | 2 +- .../consul-ui/app/components/badge/index.scss | 2 +- .../app/components/brand-loader/index.scss | 2 +- .../app/components/brand-loader/layout.scss | 2 +- .../app/components/brand-loader/skin.scss | 2 +- .../app/components/breadcrumbs/index.scss | 2 +- .../app/components/breadcrumbs/layout.scss | 2 +- .../app/components/breadcrumbs/skin.scss | 2 +- .../app/components/buttons/index.scss | 2 +- .../app/components/buttons/layout.scss | 2 +- .../app/components/buttons/skin.scss | 2 +- .../consul-ui/app/components/card/index.scss | 2 +- .../consul-ui/app/components/card/layout.scss | 2 +- .../consul-ui/app/components/card/skin.scss | 2 +- .../app/components/checkbox-group/index.scss | 2 +- .../app/components/checkbox-group/layout.scss | 2 +- .../app/components/checkbox-group/skin.scss | 2 +- .../app/components/child-selector/index.hbs | 2 +- .../app/components/child-selector/index.js | 2 +- .../app/components/code-editor/README.mdx | 2 +- .../app/components/code-editor/index.hbs | 4 +- .../app/components/code-editor/index.js | 34 +- .../app/components/code-editor/index.scss | 2 +- .../app/components/code-editor/layout.scss | 2 +- .../app/components/code-editor/skin.scss | 2 +- .../app/components/composite-row/index.scss | 2 +- .../app/components/composite-row/layout.scss | 2 +- .../components/confirmation-alert/index.hbs | 2 +- .../components/confirmation-alert/index.js | 2 +- .../components/confirmation-dialog/index.hbs | 2 +- .../components/confirmation-dialog/index.js | 2 +- .../components/confirmation-dialog/index.scss | 2 +- .../confirmation-dialog/layout.scss | 2 +- .../components/confirmation-dialog/skin.scss | 2 +- .../components/consul/acl/disabled/index.hbs | 2 +- .../consul/auth-method/binding-list/index.hbs | 2 +- .../components/consul/auth-method/index.scss | 2 +- .../consul/auth-method/list/index.hbs | 2 +- .../consul/auth-method/list/pageobject.js | 2 +- .../consul/auth-method/nspace-list/index.hbs | 2 +- .../consul/auth-method/search-bar/index.hbs | 2 +- .../consul/auth-method/type/index.hbs | 2 +- .../consul/auth-method/view/index.hbs | 2 +- .../components/consul/bucket/list/index.hbs | 2 +- .../components/consul/bucket/list/index.js | 2 +- .../components/consul/bucket/list/index.scss | 2 +- .../consul/datacenter/selector/index.hbs | 109 +- .../consul/datacenter/selector/index.js | 22 - .../consul/discovery-chain/index.hbs | 2 +- .../consul/discovery-chain/index.js | 2 +- .../consul/discovery-chain/index.scss | 2 +- .../consul/discovery-chain/layout.scss | 2 +- .../discovery-chain/resolver-card/index.hbs | 2 +- .../discovery-chain/route-card/index.hbs | 2 +- .../discovery-chain/route-card/index.js | 2 +- .../consul/discovery-chain/skin.scss | 2 +- .../discovery-chain/splitter-card/index.hbs | 2 +- .../consul/discovery-chain/utils.js | 2 +- .../consul/exposed-path/list/index.hbs | 4 +- .../consul/exposed-path/list/index.scss | 2 +- .../consul/external-source/index.hbs | 2 +- .../consul/external-source/index.scss | 2 +- .../consul/health-check/list/index.hbs | 4 +- .../consul/health-check/list/index.scss | 2 +- .../consul/health-check/list/layout.scss | 2 +- .../consul/health-check/list/pageobject.js | 2 +- .../consul/health-check/list/skin.scss | 2 +- .../consul/health-check/search-bar/index.hbs | 2 +- .../consul/instance-checks/index.hbs | 2 +- .../consul/instance-checks/index.scss | 2 +- .../consul/intention/components.scss | 2 +- .../consul/intention/form/fieldsets/index.hbs | 2 +- .../consul/intention/form/fieldsets/index.js | 2 +- .../intention/form/fieldsets/index.scss | 2 +- .../intention/form/fieldsets/layout.scss | 2 +- .../consul/intention/form/fieldsets/skin.scss | 2 +- .../consul/intention/form/index.hbs | 2 +- .../components/consul/intention/form/index.js | 2 +- .../consul/intention/form/index.scss | 2 +- .../components/consul/intention/index.scss | 2 +- .../consul/intention/list/check/index.hbs | 2 +- .../consul/intention/list/components.scss | 2 +- .../consul/intention/list/index.hbs | 2 +- .../components/consul/intention/list/index.js | 2 +- .../consul/intention/list/index.scss | 2 +- .../consul/intention/list/layout.scss | 2 +- .../consul/intention/list/pageobject.js | 2 +- .../consul/intention/list/skin.scss | 2 +- .../consul/intention/list/table/index.hbs | 2 +- .../consul/intention/list/table/index.scss | 2 +- .../notice/custom-resource/index.hbs | 2 +- .../intention/notice/permissions/index.hbs | 2 +- .../consul/intention/notifications/index.hbs | 2 +- .../intention/permission/form/index.hbs | 2 +- .../consul/intention/permission/form/index.js | 2 +- .../intention/permission/form/index.scss | 2 +- .../intention/permission/form/layout.scss | 2 +- .../intention/permission/form/pageobject.js | 2 +- .../intention/permission/form/skin.scss | 2 +- .../permission/header/form/index.hbs | 2 +- .../intention/permission/header/form/index.js | 2 +- .../permission/header/form/pageobject.js | 2 +- .../permission/header/list/index.hbs | 2 +- .../intention/permission/header/list/index.js | 2 +- .../permission/header/list/index.scss | 2 +- .../permission/header/list/layout.scss | 2 +- .../permission/header/list/pageobject.js | 2 +- .../permission/header/list/skin.scss | 2 +- .../intention/permission/list/index.hbs | 2 +- .../consul/intention/permission/list/index.js | 2 +- .../intention/permission/list/index.scss | 2 +- .../intention/permission/list/layout.scss | 2 +- .../intention/permission/list/pageobject.js | 2 +- .../intention/permission/list/skin.scss | 2 +- .../consul/intention/search-bar/index.hbs | 2 +- .../consul/intention/search-bar/index.scss | 2 +- .../consul/intention/view/index.hbs | 2 +- .../components/consul/intention/view/index.js | 2 +- .../app/components/consul/kind/index.hbs | 2 +- .../app/components/consul/kind/index.js | 2 +- .../app/components/consul/kind/index.scss | 2 +- .../app/components/consul/kv/form/index.hbs | 2 +- .../app/components/consul/kv/form/index.js | 2 +- .../app/components/consul/kv/list/index.hbs | 2 +- .../components/consul/kv/list/pageobject.js | 2 +- .../components/consul/kv/search-bar/index.hbs | 2 +- .../app/components/consul/loader/index.hbs | 2 +- .../app/components/consul/loader/index.scss | 2 +- .../app/components/consul/loader/layout.scss | 2 +- .../app/components/consul/loader/skin.scss | 2 +- .../components/consul/metadata/list/index.hbs | 2 +- .../components/consul/metadata/list/index.js | 2 +- .../consul/node-identity/template/index.hbs | 2 +- .../consul/node/agentless-notice/index.hbs | 2 +- .../consul/node/agentless-notice/index.js | 2 +- .../consul/node/agentless-notice/index.scss | 2 +- .../app/components/consul/node/list/index.hbs | 4 +- .../consul/node/peer-info/index.hbs | 2 +- .../consul/node/peer-info/index.scss | 2 +- .../consul/node/search-bar/index.hbs | 260 +- .../app/components/consul/peer/info/index.hbs | 2 +- .../components/consul/peer/info/index.scss | 2 +- .../components/consul/peer/list/index.scss | 2 +- .../components/consul/policy/list/index.hbs | 2 +- .../consul/policy/list/pageobject.js | 2 +- .../consul/policy/notifications/index.hbs | 2 +- .../consul/policy/search-bar/index.hbs | 2 +- .../consul/policy/search-bar/index.js | 2 +- .../app/components/consul/role/list/index.hbs | 2 +- .../components/consul/role/list/pageobject.js | 2 +- .../consul/role/notifications/index.hbs | 2 +- .../consul/role/search-bar/index.hbs | 2 +- .../components/consul/server/card/index.hbs | 2 +- .../components/consul/server/card/index.scss | 2 +- .../components/consul/server/card/layout.scss | 2 +- .../components/consul/server/card/skin.scss | 2 +- .../components/consul/server/list/index.hbs | 2 +- .../components/consul/server/list/index.scss | 2 +- .../service-identity/template/index.hbs | 2 +- .../consul/service-instance/list/index.hbs | 2 +- .../consul/service-instance/list/index.js | 2 +- .../service-instance/search-bar/index.hbs | 2 +- .../components/consul/service/list/index.hbs | 84 +- .../consul/service/list/item/index.hbs | 82 - .../consul/service/list/item/index.js | 25 - .../consul/service/search-bar/index.hbs | 2 +- .../consul/service/search-bar/index.js | 2 +- .../app/components/consul/source/index.hbs | 2 +- .../app/components/consul/source/index.scss | 2 +- .../consul/sources-select/index.hbs | 2 +- .../components/consul/token/list/index.hbs | 6 +- .../consul/token/list/pageobject.js | 2 +- .../consul/token/notifications/index.hbs | 2 +- .../consul/token/ruleset/list/index.hbs | 2 +- .../consul/token/ruleset/list/index.js | 2 +- .../consul/token/search-bar/index.hbs | 2 +- .../consul/tomography/graph/index.hbs | 2 +- .../consul/tomography/graph/index.js | 2 +- .../consul/tomography/graph/index.scss | 2 +- .../consul/transparent-proxy/index.hbs | 2 +- .../consul/upstream-instance/list/index.hbs | 6 +- .../consul/upstream-instance/list/index.scss | 2 +- .../upstream-instance/list/pageobject.js | 2 +- .../upstream-instance/search-bar/index.hbs | 2 +- .../components/consul/upstream/list/index.hbs | 4 +- .../consul/upstream/list/index.scss | 2 +- .../consul/upstream/search-bar/index.hbs | 2 +- .../README.mdx | 8 +- .../chart.xstate.js | 2 +- .../index.hbs | 6 +- .../index.js | 4 +- .../index.scss | 2 +- .../layout.scss | 2 +- .../skin.scss | 2 +- .../app/components/copyable-code/index.hbs | 6 +- .../app/components/copyable-code/index.scss | 2 +- .../app/components/csv-list/debug.scss | 2 +- .../app/components/csv-list/index.scss | 2 +- .../app/components/data-collection/index.hbs | 2 +- .../app/components/data-collection/index.js | 2 +- .../app/components/data-form/index.hbs | 2 +- .../app/components/data-form/index.js | 2 +- .../components/data-loader/chart.xstate.js | 2 +- .../app/components/data-loader/index.hbs | 2 +- .../app/components/data-loader/index.js | 2 +- .../app/components/data-sink/index.hbs | 2 +- .../app/components/data-sink/index.js | 2 +- .../app/components/data-source/index.hbs | 2 +- .../app/components/data-source/index.js | 2 +- .../components/data-writer/chart.xstate.js | 2 +- .../app/components/data-writer/index.hbs | 2 +- .../app/components/data-writer/index.js | 2 +- .../app/components/debug/navigation/index.hbs | 8 +- .../components/definition-table/debug.scss | 2 +- .../components/definition-table/index.scss | 2 +- .../components/definition-table/layout.scss | 2 +- .../app/components/definition-table/skin.scss | 2 +- .../components/delete-confirmation/index.hbs | 2 +- .../components/delete-confirmation/index.js | 2 +- .../disclosure-menu/action/index.hbs | 2 +- .../app/components/disclosure-menu/index.hbs | 2 +- .../app/components/disclosure-menu/index.scss | 2 +- .../components/disclosure-menu/menu/index.hbs | 20 +- .../components/disclosure/action/index.hbs | 2 +- .../components/disclosure/details/index.hbs | 2 +- .../app/components/disclosure/index.hbs | 2 +- .../app/components/disclosure/index.js | 2 +- .../app/components/display-toggle/index.scss | 2 +- .../app/components/display-toggle/layout.scss | 2 +- .../app/components/display-toggle/skin.scss | 2 +- .../components/dom-recycling-table/index.scss | 2 +- .../dom-recycling-table/layout.scss | 2 +- .../app/components/empty-state/index.hbs | 2 +- .../app/components/empty-state/index.js | 2 +- .../app/components/empty-state/index.scss | 2 +- .../app/components/empty-state/layout.scss | 2 +- .../app/components/empty-state/pageobject.js | 2 +- .../app/components/empty-state/skin.scss | 2 +- .../app/components/error-state/index.hbs | 2 +- .../app/components/event-source/index.hbs | 2 +- .../app/components/event-source/index.js | 2 +- .../expanded-single-select/index.scss | 2 +- .../expanded-single-select/layout.scss | 2 +- .../expanded-single-select/skin.scss | 2 +- .../app/components/filter-bar/index.scss | 2 +- .../app/components/filter-bar/layout.scss | 2 +- .../app/components/filter-bar/skin.scss | 2 +- .../app/components/form-component/index.hbs | 2 +- .../app/components/form-component/index.js | 2 +- .../app/components/form-elements/index.scss | 2 +- .../app/components/form-elements/layout.scss | 2 +- .../app/components/form-elements/skin.scss | 2 +- .../form-group/element/checkbox/index.hbs | 2 +- .../form-group/element/error/index.hbs | 2 +- .../components/form-group/element/index.hbs | 2 +- .../components/form-group/element/index.js | 2 +- .../form-group/element/label/index.hbs | 2 +- .../form-group/element/radio/index.hbs | 2 +- .../form-group/element/text/index.hbs | 2 +- .../app/components/form-group/index.hbs | 2 +- .../app/components/form-group/index.js | 2 +- .../app/components/form-input/index.hbs | 2 +- .../app/components/freetext-filter/index.hbs | 2 +- .../app/components/freetext-filter/index.js | 2 +- .../app/components/freetext-filter/index.scss | 2 +- .../components/freetext-filter/layout.scss | 2 +- .../components/freetext-filter/pageobject.js | 2 +- .../app/components/freetext-filter/skin.scss | 2 +- .../app/components/hashicorp-consul/index.hbs | 290 +- .../app/components/hashicorp-consul/index.js | 4 +- .../components/hashicorp-consul/index.scss | 122 +- .../components/hashicorp-consul/pageobject.js | 9 +- .../app/components/hcp-nav-item/index.hbs | 34 - .../app/components/hcp-nav-item/index.js | 57 - .../components/horizontal-kv-list/README.mdx | 8 +- .../components/horizontal-kv-list/debug.scss | 2 +- .../components/horizontal-kv-list/index.scss | 2 +- .../components/horizontal-kv-list/layout.scss | 2 +- .../components/horizontal-kv-list/skin.scss | 2 +- .../app/components/icon-definition/debug.scss | 2 +- .../app/components/icon-definition/index.scss | 2 +- .../app/components/informed-action/index.hbs | 2 +- .../app/components/informed-action/index.scss | 2 +- .../components/informed-action/layout.scss | 2 +- .../app/components/informed-action/skin.scss | 2 +- .../app/components/inline-alert/debug.scss | 2 +- .../app/components/inline-alert/index.scss | 2 +- .../app/components/inline-alert/layout.scss | 2 +- .../app/components/inline-alert/skin.scss | 2 +- .../app/components/inline-code/index.scss | 2 +- .../app/components/inline-code/layout.scss | 2 +- .../app/components/inline-code/skin.scss | 2 +- .../app/components/jwt-source/index.js | 2 +- .../components/link-to-hcp-banner/index.hbs | 24 - .../components/link-to-hcp-banner/index.js | 27 - .../components/link-to-hcp-modal/index.hbs | 112 - .../app/components/link-to-hcp-modal/index.js | 66 - .../components/link-to-hcp-modal/index.scss | 33 - .../app/components/list-collection/index.hbs | 2 +- .../app/components/list-collection/index.js | 2 +- .../app/components/list-collection/index.scss | 2 +- .../components/list-collection/layout.scss | 2 +- .../app/components/list-collection/skin.scss | 2 +- .../app/components/list-row/index.scss | 2 +- .../app/components/list-row/layout.scss | 2 +- .../app/components/list-row/skin.scss | 2 +- .../main-header-horizontal/index.scss | 7 + .../main-header-horizontal/layout.scss | 32 + .../main-header-horizontal/skin.scss | 8 + .../components/main-nav-horizontal/index.scss | 36 + .../main-nav-horizontal/layout.scss | 48 + .../components/main-nav-horizontal/skin.scss | 45 + .../components/main-nav-vertical/README.mdx | 71 + .../components/main-nav-vertical/debug.scss | 34 + .../components/main-nav-vertical/index.scss | 45 + .../components/main-nav-vertical/layout.scss | 68 + .../components/main-nav-vertical/skin.scss | 114 + .../app/components/menu-panel/deprecated.scss | 2 +- .../app/components/menu-panel/index.hbs | 2 +- .../app/components/menu-panel/index.js | 2 +- .../app/components/menu-panel/index.scss | 2 +- .../app/components/menu-panel/layout.scss | 2 +- .../app/components/menu-panel/skin.scss | 2 +- .../app/components/menu/action/index.hbs | 2 +- .../consul-ui/app/components/menu/index.hbs | 27 +- .../app/components/menu/item/index.hbs | 2 +- .../app/components/menu/separator/index.hbs | 2 +- .../app/components/modal-dialog/index.hbs | 2 +- .../app/components/modal-dialog/index.js | 2 +- .../app/components/modal-dialog/index.scss | 2 +- .../app/components/modal-dialog/layout.scss | 2 +- .../app/components/modal-dialog/skin.scss | 2 +- .../app/components/modal-layer/index.hbs | 2 +- .../components/more-popover-menu/index.hbs | 2 +- .../app/components/more-popover-menu/index.js | 2 +- .../components/more-popover-menu/index.scss | 2 +- .../more-popover-menu/pageobject.js | 2 +- .../app/components/nav-selector/generic.hbs | 6 - .../app/components/nav-selector/index.hbs | 62 - .../app/components/nav-selector/index.js | 29 - .../components/oidc-select/chart.xstate.js | 2 +- .../app/components/oidc-select/index.hbs | 2 +- .../app/components/oidc-select/index.js | 2 +- .../app/components/oidc-select/index.scss | 2 +- .../app/components/oidc-select/layout.scss | 2 +- .../app/components/oidc-select/skin.scss | 2 +- .../app/components/option-input/index.hbs | 2 +- .../consul-ui/app/components/outlet/index.hbs | 2 +- .../consul-ui/app/components/outlet/index.js | 2 +- .../app/components/overlay/index.scss | 2 +- .../app/components/overlay/none.scss | 2 +- .../app/components/overlay/square-tail.scss | 2 +- .../app/components/paged-collection/index.hbs | 2 +- .../app/components/paged-collection/index.js | 2 +- .../components/paged-collection/index.scss | 2 +- .../consul-ui/app/components/panel/debug.scss | 2 +- .../app/components/panel/index.css.js | 2 +- .../consul-ui/app/components/panel/index.scss | 2 +- .../app/components/panel/layout.scss | 2 +- .../consul-ui/app/components/panel/skin.scss | 2 +- .../components/peerings/badge/icon/index.hbs | 2 +- .../app/components/peerings/badge/index.hbs | 2 +- .../app/components/peerings/badge/index.js | 2 +- .../app/components/peerings/badge/index.scss | 2 +- .../components/peerings/provider/index.hbs | 2 +- .../app/components/peerings/provider/index.js | 2 +- .../consul-ui/app/components/pill/index.scss | 2 +- .../consul-ui/app/components/pill/layout.scss | 2 +- .../consul-ui/app/components/pill/skin.scss | 2 +- .../app/components/policy-form/index.hbs | 2 +- .../app/components/policy-form/index.js | 2 +- .../app/components/policy-form/pageobject.js | 2 +- .../app/components/policy-selector/index.hbs | 2 +- .../app/components/policy-selector/index.js | 2 +- .../components/policy-selector/pageobject.js | 2 +- .../app/components/popover-menu/index.hbs | 2 +- .../app/components/popover-menu/index.js | 2 +- .../app/components/popover-menu/index.scss | 2 +- .../app/components/popover-menu/layout.scss | 2 +- .../popover-menu/menu-item/index.hbs | 2 +- .../popover-menu/menu-item/index.js | 2 +- .../popover-menu/menu-separator/index.hbs | 2 +- .../popover-menu/menu-separator/index.js | 2 +- .../app/components/popover-menu/skin.scss | 2 +- .../app/components/popover-select/index.hbs | 2 +- .../app/components/popover-select/index.js | 2 +- .../app/components/popover-select/index.scss | 2 +- .../popover-select/optgroup/index.hbs | 2 +- .../popover-select/option/index.hbs | 2 +- .../components/popover-select/option/index.js | 2 +- .../components/popover-select/pageobject.js | 2 +- .../app/components/power-select/pageobject.js | 2 +- .../app/components/progress/index.hbs | 2 +- .../app/components/progress/index.scss | 2 +- .../app/components/progress/layout.scss | 2 +- .../app/components/progress/skin.scss | 2 +- .../components/providers/dimension/index.hbs | 2 +- .../components/providers/dimension/index.js | 2 +- .../app/components/providers/search/index.hbs | 2 +- .../app/components/providers/search/index.js | 2 +- .../app/components/radio-card/index.hbs | 2 +- .../app/components/radio-card/index.js | 2 +- .../app/components/radio-card/index.scss | 2 +- .../app/components/radio-card/layout.scss | 2 +- .../app/components/radio-card/skin.scss | 2 +- .../app/components/radio-group/index.hbs | 2 +- .../app/components/radio-group/index.js | 2 +- .../app/components/radio-group/index.scss | 2 +- .../app/components/radio-group/layout.scss | 2 +- .../app/components/radio-group/pageobject.js | 2 +- .../app/components/radio-group/skin.scss | 2 +- .../consul-ui/app/components/ref/index.js | 2 +- .../app/components/role-form/index.hbs | 2 +- .../app/components/role-form/index.js | 2 +- .../app/components/role-form/pageobject.js | 2 +- .../app/components/role-selector/index.hbs | 2 +- .../app/components/role-selector/index.js | 2 +- .../app/components/role-selector/index.scss | 2 +- .../components/role-selector/pageobject.js | 2 +- .../app/components/route/announcer/index.hbs | 2 +- .../consul-ui/app/components/route/index.hbs | 2 +- .../consul-ui/app/components/route/index.js | 2 +- .../app/components/route/title/index.hbs | 2 +- .../app/components/route/title/index.scss | 2 +- .../app/components/search-bar/index.hbs | 2 +- .../app/components/search-bar/index.js | 2 +- .../app/components/search-bar/index.scss | 2 +- .../search-bar/remove-filter/index.hbs | 2 +- .../app/components/search-bar/utils.js | 2 +- .../app/components/skip-links/index.scss | 7 + .../app/components/skip-links/layout.scss | 26 + .../app/components/skip-links/skin.scss | 14 + .../app/components/sliding-toggle/index.scss | 2 +- .../app/components/sliding-toggle/layout.scss | 2 +- .../app/components/sliding-toggle/skin.scss | 2 +- .../components/state-chart/action/index.hbs | 2 +- .../components/state-chart/action/index.js | 2 +- .../components/state-chart/guard/index.hbs | 2 +- .../app/components/state-chart/guard/index.js | 2 +- .../app/components/state-chart/index.hbs | 2 +- .../app/components/state-chart/index.js | 2 +- .../app/components/state-machine/index.hbs | 2 +- .../app/components/state-machine/index.js | 2 +- .../consul-ui/app/components/state/index.hbs | 2 +- .../consul-ui/app/components/state/index.js | 2 +- .../app/components/tab-nav/index.hbs | 2 +- .../consul-ui/app/components/tab-nav/index.js | 2 +- .../app/components/tab-nav/index.scss | 2 +- .../app/components/tab-nav/layout.scss | 2 +- .../app/components/tab-nav/pageobject.js | 2 +- .../app/components/tab-nav/skin.scss | 2 +- .../consul-ui/app/components/table/index.scss | 2 +- .../app/components/table/layout.scss | 2 +- .../consul-ui/app/components/table/skin.scss | 2 +- .../components/tabular-collection/index.hbs | 2 +- .../components/tabular-collection/index.js | 5 +- .../components/tabular-collection/index.scss | 2 +- .../app/components/tabular-details/index.hbs | 2 +- .../app/components/tabular-details/index.js | 2 +- .../app/components/tabular-details/index.scss | 2 +- .../components/tabular-details/layout.scss | 2 +- .../app/components/tabular-details/skin.scss | 2 +- .../app/components/tabular-dl/index.scss | 2 +- .../app/components/tabular-dl/layout.scss | 2 +- .../app/components/tabular-dl/skin.scss | 2 +- .../app/components/tag-list/index.hbs | 2 +- .../app/components/tag-list/index.scss | 2 +- .../app/components/text-input/index.hbs | 2 +- .../consul-ui/app/components/tile/debug.scss | 2 +- .../consul-ui/app/components/tile/index.scss | 2 +- .../app/components/toggle-button/index.hbs | 2 +- .../app/components/toggle-button/index.js | 2 +- .../app/components/toggle-button/index.scss | 2 +- .../app/components/toggle-button/layout.scss | 2 +- .../app/components/toggle-button/skin.scss | 2 +- .../app/components/token-list/index.hbs | 2 +- .../app/components/token-list/index.js | 2 +- .../app/components/token-list/pageobject.js | 2 +- .../components/token-source/chart.xstate.js | 2 +- .../app/components/token-source/index.hbs | 2 +- .../app/components/token-source/index.js | 2 +- .../app/components/tooltip-panel/index.scss | 2 +- .../app/components/tooltip-panel/layout.scss | 2 +- .../app/components/tooltip-panel/skin.scss | 2 +- .../app/components/tooltip/index.hbs | 2 +- .../app/components/tooltip/index.scss | 2 +- .../topology-metrics/card/index.hbs | 2 +- .../components/topology-metrics/card/index.js | 2 +- .../topology-metrics/card/index.scss | 2 +- .../topology-metrics/down-lines/index.hbs | 2 +- .../topology-metrics/down-lines/index.js | 2 +- .../app/components/topology-metrics/index.hbs | 2 +- .../app/components/topology-metrics/index.js | 2 +- .../components/topology-metrics/index.scss | 2 +- .../components/topology-metrics/layout.scss | 2 +- .../topology-metrics/notifications/index.hbs | 2 +- .../topology-metrics/popover/index.hbs | 2 +- .../topology-metrics/popover/index.js | 2 +- .../topology-metrics/popover/index.scss | 2 +- .../topology-metrics/series/index.hbs | 2 +- .../topology-metrics/series/index.js | 2 +- .../topology-metrics/series/index.scss | 2 +- .../topology-metrics/series/layout.scss | 2 +- .../topology-metrics/series/skin.scss | 2 +- .../app/components/topology-metrics/skin.scss | 2 +- .../topology-metrics/source-type/index.hbs | 2 +- .../topology-metrics/source-type/index.scss | 2 +- .../topology-metrics/stats/index.hbs | 2 +- .../topology-metrics/stats/index.js | 2 +- .../topology-metrics/stats/index.scss | 2 +- .../topology-metrics/status/index.hbs | 2 +- .../topology-metrics/status/index.scss | 2 +- .../topology-metrics/up-lines/index.hbs | 2 +- .../topology-metrics/up-lines/index.js | 2 +- .../app/components/watcher/index.hbs | 2 +- .../consul-ui/app/components/watcher/index.js | 2 +- .../consul-ui/app/components/yield/index.hbs | 2 +- .../app/controllers/_peered-resource.js | 2 +- .../consul-ui/app/controllers/application.js | 3 +- .../controllers/dc/acls/policies/create.js | 2 +- .../app/controllers/dc/acls/policies/edit.js | 2 +- .../app/controllers/dc/acls/roles/create.js | 2 +- .../app/controllers/dc/acls/roles/edit.js | 2 +- .../app/controllers/dc/acls/tokens/create.js | 2 +- .../app/controllers/dc/acls/tokens/edit.js | 2 +- .../app/controllers/dc/nodes/index.js | 2 +- .../app/controllers/dc/services/index.js | 2 +- .../dc/services/instance/healthchecks.js | 2 +- .../consul-ui/app/decorators/data-source.js | 2 +- .../consul-ui/app/decorators/replace.js | 2 +- ui/packages/consul-ui/app/env.js | 2 +- .../app/filter/predicates/auth-method.js | 2 +- .../app/filter/predicates/health-check.js | 2 +- .../app/filter/predicates/intention.js | 2 +- .../consul-ui/app/filter/predicates/kv.js | 2 +- .../consul-ui/app/filter/predicates/node.js | 11 +- .../consul-ui/app/filter/predicates/peer.js | 2 +- .../consul-ui/app/filter/predicates/policy.js | 2 +- .../app/filter/predicates/service-instance.js | 2 +- .../app/filter/predicates/service.js | 2 +- .../consul-ui/app/filter/predicates/token.js | 2 +- ui/packages/consul-ui/app/formats.js | 2 +- ui/packages/consul-ui/app/forms/intention.js | 2 +- ui/packages/consul-ui/app/forms/kv.js | 2 +- ui/packages/consul-ui/app/forms/policy.js | 2 +- ui/packages/consul-ui/app/forms/role.js | 2 +- ui/packages/consul-ui/app/forms/token.js | 2 +- .../consul-ui/app/helpers/adopt-styles.js | 2 +- ui/packages/consul-ui/app/helpers/atob.js | 2 +- .../consul-ui/app/helpers/cached-model.js | 2 +- .../consul-ui/app/helpers/class-map.js | 2 +- .../consul-ui/app/helpers/collection.js | 2 +- ui/packages/consul-ui/app/helpers/css-map.js | 2 +- ui/packages/consul-ui/app/helpers/css.js | 2 +- .../consul-ui/app/helpers/document-attrs.js | 2 +- .../consul-ui/app/helpers/dom-position.js | 2 +- .../consul-ui/app/helpers/duration-from.js | 2 +- ui/packages/consul-ui/app/helpers/env.js | 2 +- .../consul-ui/app/helpers/flatten-property.js | 2 +- .../app/helpers/format-short-time.js | 2 +- .../app/helpers/hcp-authentication-link.js | 34 - .../app/helpers/hcp-resource-id-to-link.js | 32 - ui/packages/consul-ui/app/helpers/href-to.js | 2 +- .../consul-ui/app/helpers/icon-mapping.js | 2 +- .../consul-ui/app/helpers/icons-debug.js | 2 +- ui/packages/consul-ui/app/helpers/is-href.js | 2 +- ui/packages/consul-ui/app/helpers/is.js | 2 +- .../consul-ui/app/helpers/json-stringify.js | 2 +- ui/packages/consul-ui/app/helpers/last.js | 2 +- .../consul-ui/app/helpers/left-trim.js | 2 +- .../consul-ui/app/helpers/merge-checks.js | 2 +- .../consul-ui/app/helpers/percentage-of.js | 2 +- .../app/helpers/policy/datacenters.js | 2 +- .../consul-ui/app/helpers/policy/group.js | 2 +- .../consul-ui/app/helpers/policy/typeof.js | 2 +- .../consul-ui/app/helpers/refresh-route.js | 2 +- .../consul-ui/app/helpers/render-template.js | 2 +- ui/packages/consul-ui/app/helpers/require.js | 2 +- .../consul-ui/app/helpers/right-trim.js | 2 +- .../consul-ui/app/helpers/route-match.js | 2 +- .../app/helpers/service/card-permissions.js | 2 +- .../app/helpers/service/external-source.js | 2 +- .../app/helpers/service/health-percentage.js | 2 +- ui/packages/consul-ui/app/helpers/slugify.js | 2 +- .../app/helpers/smart-date-format.js | 2 +- ui/packages/consul-ui/app/helpers/split.js | 2 +- .../consul-ui/app/helpers/state-chart.js | 2 +- .../consul-ui/app/helpers/state-matches.js | 2 +- .../consul-ui/app/helpers/style-map.js | 2 +- ui/packages/consul-ui/app/helpers/substr.js | 2 +- .../consul-ui/app/helpers/svg-curve.js | 2 +- .../consul-ui/app/helpers/temporal-format.js | 2 +- .../consul-ui/app/helpers/temporal-within.js | 2 +- ui/packages/consul-ui/app/helpers/test.js | 2 +- ui/packages/consul-ui/app/helpers/to-hash.js | 2 +- ui/packages/consul-ui/app/helpers/to-route.js | 2 +- .../app/helpers/token/is-anonymous.js | 2 +- .../consul-ui/app/helpers/token/is-legacy.js | 2 +- ui/packages/consul-ui/app/helpers/tween-to.js | 2 +- ui/packages/consul-ui/app/helpers/uniq-by.js | 2 +- .../consul-ui/app/helpers/unique-id.js | 2 +- ui/packages/consul-ui/app/helpers/uri.js | 2 +- ui/packages/consul-ui/app/index.html | 2 +- .../app/instance-initializers/container.js | 2 +- .../app/instance-initializers/href-to.js | 2 +- .../instance-initializers/ivy-codemirror.js | 2 +- .../app/instance-initializers/selection.js | 2 +- .../app/locations/fsm-with-optional-test.js | 2 +- .../app/locations/fsm-with-optional.js | 11 +- ui/packages/consul-ui/app/locations/fsm.js | 2 +- .../consul-ui/app/machines/boolean.xstate.js | 2 +- .../consul-ui/app/machines/validate.xstate.js | 2 +- .../consul-ui/app/mixins/policy/as-many.js | 2 +- .../consul-ui/app/mixins/role/as-many.js | 2 +- .../app/mixins/with-blocking-actions.js | 2 +- .../consul-ui/app/models/auth-method.js | 2 +- .../consul-ui/app/models/binding-rule.js | 2 +- .../consul-ui/app/models/coordinate.js | 2 +- ui/packages/consul-ui/app/models/dc.js | 2 +- .../consul-ui/app/models/discovery-chain.js | 2 +- .../consul-ui/app/models/gateway-config.js | 2 +- .../consul-ui/app/models/health-check.js | 2 +- .../intention-permission-http-header.js | 2 +- .../app/models/intention-permission-http.js | 2 +- .../app/models/intention-permission.js | 2 +- ui/packages/consul-ui/app/models/intention.js | 2 +- ui/packages/consul-ui/app/models/kv.js | 2 +- ui/packages/consul-ui/app/models/license.js | 2 +- ui/packages/consul-ui/app/models/node.js | 2 +- ui/packages/consul-ui/app/models/nspace.js | 2 +- .../consul-ui/app/models/oidc-provider.js | 2 +- ui/packages/consul-ui/app/models/partition.js | 2 +- ui/packages/consul-ui/app/models/peer.js | 2 +- .../consul-ui/app/models/permission.js | 2 +- ui/packages/consul-ui/app/models/policy.js | 2 +- ui/packages/consul-ui/app/models/proxy.js | 2 +- ui/packages/consul-ui/app/models/role.js | 2 +- .../consul-ui/app/models/service-instance.js | 2 +- ui/packages/consul-ui/app/models/service.js | 2 +- ui/packages/consul-ui/app/models/session.js | 2 +- ui/packages/consul-ui/app/models/token.js | 2 +- ui/packages/consul-ui/app/models/topology.js | 2 +- .../consul-ui/app/modifiers/aria-menu.js | 48 +- .../consul-ui/app/modifiers/css-prop.js | 11 +- .../consul-ui/app/modifiers/css-props.js | 2 +- .../consul-ui/app/modifiers/did-upsert.js | 48 +- .../consul-ui/app/modifiers/disabled.js | 2 +- .../consul-ui/app/modifiers/notification.js | 31 +- .../consul-ui/app/modifiers/on-outside.js | 45 +- ui/packages/consul-ui/app/modifiers/style.js | 16 +- .../consul-ui/app/modifiers/tooltip.js | 2 +- .../consul-ui/app/modifiers/validate.js | 59 +- .../consul-ui/app/modifiers/with-copyable.js | 34 +- .../consul-ui/app/modifiers/with-overlay.js | 2 +- ui/packages/consul-ui/app/router.js | 2 +- .../consul-ui/app/routes/application.js | 10 +- ui/packages/consul-ui/app/routes/dc.js | 2 +- .../app/routes/dc/acls/auth-methods/index.js | 2 +- .../routes/dc/acls/auth-methods/show/index.js | 2 +- .../app/routes/dc/acls/policies/create.js | 2 +- .../app/routes/dc/acls/policies/edit.js | 2 +- .../app/routes/dc/acls/policies/index.js | 2 +- .../app/routes/dc/acls/roles/create.js | 2 +- .../app/routes/dc/acls/roles/edit.js | 2 +- .../app/routes/dc/acls/roles/index.js | 2 +- .../app/routes/dc/acls/tokens/create.js | 2 +- .../app/routes/dc/acls/tokens/edit.js | 2 +- .../app/routes/dc/acls/tokens/index.js | 2 +- .../consul-ui/app/routes/dc/kv/folder.js | 2 +- .../consul-ui/app/routes/dc/kv/index.js | 2 +- .../app/routes/dc/services/notfound.js | 2 +- .../app/routes/dc/services/show/topology.js | 2 +- .../consul-ui/app/routes/unavailable.js | 18 - .../app/routing/application-debug.js | 2 +- ui/packages/consul-ui/app/routing/route.js | 2 +- ui/packages/consul-ui/app/routing/single.js | 2 +- .../consul-ui/app/search/predicates/acl.js | 2 +- .../app/search/predicates/auth-method.js | 2 +- .../app/search/predicates/health-check.js | 2 +- .../app/search/predicates/intention.js | 2 +- .../consul-ui/app/search/predicates/kv.js | 2 +- .../consul-ui/app/search/predicates/node.js | 2 +- .../consul-ui/app/search/predicates/nspace.js | 2 +- .../consul-ui/app/search/predicates/peer.js | 2 +- .../consul-ui/app/search/predicates/policy.js | 2 +- .../consul-ui/app/search/predicates/role.js | 2 +- .../app/search/predicates/service-instance.js | 2 +- .../app/search/predicates/service.js | 2 +- .../consul-ui/app/search/predicates/token.js | 2 +- .../search/predicates/upstream-instance.js | 2 +- .../consul-ui/app/serializers/application.js | 2 +- .../consul-ui/app/serializers/auth-method.js | 2 +- .../consul-ui/app/serializers/binding-rule.js | 2 +- .../consul-ui/app/serializers/coordinate.js | 2 +- .../app/serializers/discovery-chain.js | 2 +- ui/packages/consul-ui/app/serializers/http.js | 2 +- .../consul-ui/app/serializers/intention.js | 2 +- ui/packages/consul-ui/app/serializers/kv.js | 2 +- ui/packages/consul-ui/app/serializers/node.js | 2 +- .../consul-ui/app/serializers/nspace.js | 2 +- .../app/serializers/oidc-provider.js | 2 +- .../consul-ui/app/serializers/partition.js | 2 +- .../consul-ui/app/serializers/permission.js | 2 +- .../consul-ui/app/serializers/policy.js | 2 +- .../consul-ui/app/serializers/proxy.js | 2 +- ui/packages/consul-ui/app/serializers/role.js | 2 +- .../app/serializers/service-instance.js | 2 +- .../consul-ui/app/serializers/service.js | 2 +- .../consul-ui/app/serializers/session.js | 2 +- .../consul-ui/app/serializers/token.js | 2 +- .../consul-ui/app/serializers/topology.js | 2 +- .../consul-ui/app/services/abilities.js | 2 +- ui/packages/consul-ui/app/services/atob.js | 2 +- .../oauth2-code-with-url-provider.js | 2 +- ui/packages/consul-ui/app/services/btoa.js | 2 +- ui/packages/consul-ui/app/services/change.js | 2 +- .../app/services/client/connections.js | 2 +- .../consul-ui/app/services/client/http.js | 2 +- .../app/services/client/transports/xhr.js | 2 +- .../app/services/clipboard/local-storage.js | 2 +- .../consul-ui/app/services/clipboard/os.js | 2 +- .../app/services/code-mirror/linter.js | 2 +- .../consul-ui/app/services/container.js | 2 +- .../app/services/data-sink/protocols/http.js | 2 +- .../data-sink/protocols/local-storage.js | 2 +- .../app/services/data-sink/service.js | 2 +- .../services/data-source/protocols/http.js | 2 +- .../data-source/protocols/http/blocking.js | 2 +- .../data-source/protocols/http/promise.js | 2 +- .../data-source/protocols/local-storage.js | 2 +- .../app/services/data-source/service.js | 2 +- .../consul-ui/app/services/data-structs.js | 2 +- ui/packages/consul-ui/app/services/dom.js | 2 +- ui/packages/consul-ui/app/services/encoder.js | 2 +- ui/packages/consul-ui/app/services/env.js | 5 +- .../consul-ui/app/services/feedback.js | 2 +- ui/packages/consul-ui/app/services/filter.js | 2 +- ui/packages/consul-ui/app/services/form.js | 2 +- .../consul-ui/app/services/hcp-link-modal.js | 19 - .../consul-ui/app/services/hcp-link-status.js | 37 - ui/packages/consul-ui/app/services/hcp.js | 2 +- .../consul-ui/app/services/i18n-debug.js | 2 +- .../consul-ui/app/services/local-storage.js | 2 +- ui/packages/consul-ui/app/services/logger.js | 2 +- .../consul-ui/app/services/repository.js | 13 +- .../app/services/repository/auth-method.js | 2 +- .../app/services/repository/binding-rule.js | 2 +- .../app/services/repository/coordinate.js | 2 +- .../consul-ui/app/services/repository/dc.js | 2 +- .../services/repository/discovery-chain.js | 2 +- .../app/services/repository/hcp-link.js | 84 - .../intention-permission-http-header.js | 2 +- .../repository/intention-permission.js | 2 +- .../app/services/repository/intention.js | 2 +- .../consul-ui/app/services/repository/kv.js | 2 +- .../app/services/repository/license.js | 2 +- .../app/services/repository/metrics.js | 2 +- .../consul-ui/app/services/repository/node.js | 2 +- .../app/services/repository/nspace.js | 2 +- .../app/services/repository/oidc-provider.js | 2 +- .../app/services/repository/partition.js | 2 +- .../consul-ui/app/services/repository/peer.js | 2 +- .../app/services/repository/permission.js | 2 +- .../app/services/repository/policy.js | 2 +- .../app/services/repository/proxy.js | 2 +- .../consul-ui/app/services/repository/role.js | 2 +- .../services/repository/service-instance.js | 2 +- .../app/services/repository/service.js | 2 +- .../app/services/repository/session.js | 2 +- .../app/services/repository/token.js | 2 +- .../app/services/repository/topology.js | 2 +- ui/packages/consul-ui/app/services/routlet.js | 2 +- ui/packages/consul-ui/app/services/schema.js | 2 +- ui/packages/consul-ui/app/services/search.js | 2 +- .../consul-ui/app/services/settings.js | 2 +- ui/packages/consul-ui/app/services/sort.js | 2 +- .../app/services/state-with-charts.js | 2 +- ui/packages/consul-ui/app/services/state.js | 2 +- ui/packages/consul-ui/app/services/store.js | 2 +- .../consul-ui/app/services/temporal.js | 2 +- ui/packages/consul-ui/app/services/ticker.js | 2 +- ui/packages/consul-ui/app/services/timeout.js | 2 +- .../consul-ui/app/services/ui-config.js | 2 +- .../app/sort/comparators/auth-method.js | 2 +- .../app/sort/comparators/health-check.js | 2 +- .../app/sort/comparators/intention.js | 2 +- .../consul-ui/app/sort/comparators/kv.js | 2 +- .../consul-ui/app/sort/comparators/node.js | 2 +- .../consul-ui/app/sort/comparators/nspace.js | 2 +- .../app/sort/comparators/partition.js | 2 +- .../consul-ui/app/sort/comparators/peer.js | 2 +- .../consul-ui/app/sort/comparators/policy.js | 2 +- .../consul-ui/app/sort/comparators/role.js | 2 +- .../app/sort/comparators/service-instance.js | 2 +- .../consul-ui/app/sort/comparators/service.js | 2 +- .../consul-ui/app/sort/comparators/token.js | 2 +- .../app/sort/comparators/upstream-instance.js | 2 +- ui/packages/consul-ui/app/storages/base.js | 2 +- ui/packages/consul-ui/app/storages/notices.js | 2 +- ui/packages/consul-ui/app/styles/app.scss | 5 +- .../app/styles/base/animation/index.scss | 2 +- .../app/styles/base/color/index.scss | 4 +- .../styles/base/color/semantic-variables.scss | 2 +- .../base/color/ui/frame-placeholders.scss | 2 +- .../app/styles/base/color/ui/index.scss | 6 + .../app/styles/base/component/index.scss | 2 +- .../base/decoration/base-placeholders.scss | 2 +- .../base/decoration/base-variables.scss | 2 +- .../app/styles/base/decoration/index.scss | 2 +- .../base/decoration/visually-hidden.css.js | 2 +- .../styles/base/icons/base-keyframes.css.js | 2 +- .../app/styles/base/icons/base-keyframes.scss | 2 +- .../styles/base/icons/base-placeholders.scss | 2 +- .../app/styles/base/icons/debug.scss | 2 +- .../base/icons/icons/activity/index.scss | 2 +- .../base/icons/icons/activity/keyframes.scss | 2 +- .../icons/icons/activity/placeholders.scss | 2 +- .../icons/icons/activity/property-16.scss | 2 +- .../icons/icons/activity/property-24.scss | 2 +- .../icons/icons/alert-circle-fill/index.scss | 2 +- .../icons/alert-circle-fill/keyframes.scss | 2 +- .../icons/alert-circle-fill/placeholders.scss | 2 +- .../icons/alert-circle-fill/property-16.scss | 2 +- .../icons/alert-circle-fill/property-24.scss | 2 +- .../icons/alert-circle-outline/index.scss | 2 +- .../icons/alert-circle-outline/keyframes.scss | 2 +- .../alert-circle-outline/placeholders.scss | 2 +- .../base/icons/icons/alert-circle/index.scss | 2 +- .../icons/icons/alert-circle/keyframes.scss | 2 +- .../icons/alert-circle/placeholders.scss | 2 +- .../icons/icons/alert-circle/property-16.scss | 2 +- .../icons/icons/alert-circle/property-24.scss | 2 +- .../icons/icons/alert-octagon-fill/index.scss | 2 +- .../icons/alert-octagon-fill/keyframes.scss | 2 +- .../alert-octagon-fill/placeholders.scss | 2 +- .../icons/alert-octagon-fill/property-16.scss | 2 +- .../icons/alert-octagon-fill/property-24.scss | 2 +- .../base/icons/icons/alert-octagon/index.scss | 2 +- .../icons/icons/alert-octagon/keyframes.scss | 2 +- .../icons/alert-octagon/placeholders.scss | 2 +- .../icons/alert-octagon/property-16.scss | 2 +- .../icons/alert-octagon/property-24.scss | 2 +- .../icons/alert-triangle-fill/index.scss | 2 +- .../icons/alert-triangle-fill/keyframes.scss | 2 +- .../alert-triangle-fill/placeholders.scss | 2 +- .../alert-triangle-fill/property-16.scss | 2 +- .../alert-triangle-fill/property-24.scss | 2 +- .../icons/icons/alert-triangle/index.scss | 2 +- .../icons/icons/alert-triangle/keyframes.scss | 2 +- .../icons/alert-triangle/placeholders.scss | 2 +- .../icons/alert-triangle/property-16.scss | 2 +- .../icons/alert-triangle/property-24.scss | 2 +- .../base/icons/icons/alibaba-color/index.scss | 2 +- .../icons/icons/alibaba-color/keyframes.scss | 2 +- .../icons/alibaba-color/placeholders.scss | 2 +- .../icons/alibaba-color/property-16.scss | 2 +- .../icons/alibaba-color/property-24.scss | 2 +- .../base/icons/icons/alibaba/index.scss | 2 +- .../base/icons/icons/alibaba/keyframes.scss | 2 +- .../icons/icons/alibaba/placeholders.scss | 2 +- .../base/icons/icons/alibaba/property-16.scss | 2 +- .../base/icons/icons/alibaba/property-24.scss | 2 +- .../base/icons/icons/align-center/index.scss | 2 +- .../icons/icons/align-center/keyframes.scss | 2 +- .../icons/align-center/placeholders.scss | 2 +- .../icons/icons/align-center/property-16.scss | 2 +- .../icons/icons/align-center/property-24.scss | 2 +- .../base/icons/icons/align-justify/index.scss | 2 +- .../icons/icons/align-justify/keyframes.scss | 2 +- .../icons/align-justify/placeholders.scss | 2 +- .../icons/align-justify/property-16.scss | 2 +- .../icons/align-justify/property-24.scss | 2 +- .../base/icons/icons/align-left/index.scss | 2 +- .../icons/icons/align-left/keyframes.scss | 2 +- .../icons/icons/align-left/placeholders.scss | 2 +- .../icons/icons/align-left/property-16.scss | 2 +- .../icons/icons/align-left/property-24.scss | 2 +- .../base/icons/icons/align-right/index.scss | 2 +- .../icons/icons/align-right/keyframes.scss | 2 +- .../icons/icons/align-right/placeholders.scss | 2 +- .../icons/icons/align-right/property-16.scss | 2 +- .../icons/icons/align-right/property-24.scss | 2 +- .../icons/icons/amazon-eks-color/index.scss | 2 +- .../icons/amazon-eks-color/keyframes.scss | 2 +- .../icons/amazon-eks-color/placeholders.scss | 2 +- .../icons/amazon-eks-color/property-16.scss | 2 +- .../icons/amazon-eks-color/property-24.scss | 2 +- .../base/icons/icons/amazon-eks/index.scss | 2 +- .../icons/icons/amazon-eks/keyframes.scss | 2 +- .../icons/icons/amazon-eks/placeholders.scss | 2 +- .../icons/icons/amazon-eks/property-16.scss | 2 +- .../icons/icons/amazon-eks/property-24.scss | 2 +- .../base/icons/icons/apple-color/index.scss | 2 +- .../icons/icons/apple-color/keyframes.scss | 2 +- .../icons/icons/apple-color/placeholders.scss | 2 +- .../icons/icons/apple-color/property-16.scss | 2 +- .../icons/icons/apple-color/property-24.scss | 2 +- .../styles/base/icons/icons/apple/index.scss | 2 +- .../base/icons/icons/apple/keyframes.scss | 2 +- .../base/icons/icons/apple/placeholders.scss | 2 +- .../base/icons/icons/apple/property-16.scss | 2 +- .../base/icons/icons/apple/property-24.scss | 2 +- .../base/icons/icons/archive/index.scss | 2 +- .../base/icons/icons/archive/keyframes.scss | 2 +- .../icons/icons/archive/placeholders.scss | 2 +- .../base/icons/icons/archive/property-16.scss | 2 +- .../base/icons/icons/archive/property-24.scss | 2 +- .../icons/icons/arrow-down-circle/index.scss | 2 +- .../icons/arrow-down-circle/keyframes.scss | 2 +- .../icons/arrow-down-circle/placeholders.scss | 2 +- .../icons/arrow-down-circle/property-16.scss | 2 +- .../icons/arrow-down-circle/property-24.scss | 2 +- .../icons/icons/arrow-down-left/index.scss | 2 +- .../icons/arrow-down-left/keyframes.scss | 2 +- .../icons/arrow-down-left/placeholders.scss | 2 +- .../icons/arrow-down-left/property-16.scss | 2 +- .../icons/arrow-down-left/property-24.scss | 2 +- .../icons/icons/arrow-down-right/index.scss | 2 +- .../icons/arrow-down-right/keyframes.scss | 2 +- .../icons/arrow-down-right/placeholders.scss | 2 +- .../icons/arrow-down-right/property-16.scss | 2 +- .../icons/arrow-down-right/property-24.scss | 2 +- .../base/icons/icons/arrow-down/index.scss | 2 +- .../icons/icons/arrow-down/keyframes.scss | 2 +- .../icons/icons/arrow-down/placeholders.scss | 2 +- .../icons/icons/arrow-down/property-16.scss | 2 +- .../icons/icons/arrow-down/property-24.scss | 2 +- .../icons/icons/arrow-left-circle/index.scss | 2 +- .../icons/arrow-left-circle/keyframes.scss | 2 +- .../icons/arrow-left-circle/placeholders.scss | 2 +- .../icons/arrow-left-circle/property-16.scss | 2 +- .../icons/arrow-left-circle/property-24.scss | 2 +- .../base/icons/icons/arrow-left/index.scss | 2 +- .../icons/icons/arrow-left/keyframes.scss | 2 +- .../icons/icons/arrow-left/placeholders.scss | 2 +- .../icons/icons/arrow-left/property-16.scss | 2 +- .../icons/icons/arrow-left/property-24.scss | 2 +- .../icons/icons/arrow-right-circle/index.scss | 2 +- .../icons/arrow-right-circle/keyframes.scss | 2 +- .../arrow-right-circle/placeholders.scss | 2 +- .../icons/arrow-right-circle/property-16.scss | 2 +- .../icons/arrow-right-circle/property-24.scss | 2 +- .../base/icons/icons/arrow-right/index.scss | 2 +- .../icons/icons/arrow-right/keyframes.scss | 2 +- .../icons/icons/arrow-right/placeholders.scss | 2 +- .../icons/icons/arrow-right/property-16.scss | 2 +- .../icons/icons/arrow-right/property-24.scss | 2 +- .../icons/icons/arrow-up-circle/index.scss | 2 +- .../icons/arrow-up-circle/keyframes.scss | 2 +- .../icons/arrow-up-circle/placeholders.scss | 2 +- .../icons/arrow-up-circle/property-16.scss | 2 +- .../icons/arrow-up-circle/property-24.scss | 2 +- .../base/icons/icons/arrow-up-left/index.scss | 2 +- .../icons/icons/arrow-up-left/keyframes.scss | 2 +- .../icons/arrow-up-left/placeholders.scss | 2 +- .../icons/arrow-up-left/property-16.scss | 2 +- .../icons/arrow-up-left/property-24.scss | 2 +- .../icons/icons/arrow-up-right/index.scss | 2 +- .../icons/icons/arrow-up-right/keyframes.scss | 2 +- .../icons/arrow-up-right/placeholders.scss | 2 +- .../icons/arrow-up-right/property-16.scss | 2 +- .../icons/arrow-up-right/property-24.scss | 2 +- .../base/icons/icons/arrow-up/index.scss | 2 +- .../base/icons/icons/arrow-up/keyframes.scss | 2 +- .../icons/icons/arrow-up/placeholders.scss | 2 +- .../icons/icons/arrow-up/property-16.scss | 2 +- .../icons/icons/arrow-up/property-24.scss | 2 +- .../base/icons/icons/at-sign/index.scss | 2 +- .../base/icons/icons/at-sign/keyframes.scss | 2 +- .../icons/icons/at-sign/placeholders.scss | 2 +- .../base/icons/icons/at-sign/property-16.scss | 2 +- .../base/icons/icons/at-sign/property-24.scss | 2 +- .../base/icons/icons/auth0-color/index.scss | 2 +- .../icons/icons/auth0-color/keyframes.scss | 2 +- .../icons/icons/auth0-color/placeholders.scss | 2 +- .../icons/icons/auth0-color/property-16.scss | 2 +- .../icons/icons/auth0-color/property-24.scss | 2 +- .../styles/base/icons/icons/auth0/index.scss | 2 +- .../base/icons/icons/auth0/keyframes.scss | 2 +- .../base/icons/icons/auth0/placeholders.scss | 2 +- .../base/icons/icons/auth0/property-16.scss | 2 +- .../base/icons/icons/auth0/property-24.scss | 2 +- .../base/icons/icons/auto-apply/index.scss | 2 +- .../icons/icons/auto-apply/keyframes.scss | 2 +- .../icons/icons/auto-apply/placeholders.scss | 2 +- .../icons/icons/auto-apply/property-16.scss | 2 +- .../icons/icons/auto-apply/property-24.scss | 2 +- .../styles/base/icons/icons/award/index.scss | 2 +- .../base/icons/icons/award/keyframes.scss | 2 +- .../base/icons/icons/award/placeholders.scss | 2 +- .../base/icons/icons/award/property-16.scss | 2 +- .../base/icons/icons/award/property-24.scss | 2 +- .../base/icons/icons/azure-color/index.scss | 2 +- .../icons/icons/azure-color/keyframes.scss | 2 +- .../icons/icons/azure-color/placeholders.scss | 2 +- .../icons/icons/azure-color/property-16.scss | 2 +- .../icons/icons/azure-color/property-24.scss | 2 +- .../icons/icons/azure-devops-color/index.scss | 2 +- .../icons/azure-devops-color/keyframes.scss | 2 +- .../azure-devops-color/placeholders.scss | 2 +- .../icons/azure-devops-color/property-16.scss | 2 +- .../icons/azure-devops-color/property-24.scss | 2 +- .../base/icons/icons/azure-devops/index.scss | 2 +- .../icons/icons/azure-devops/keyframes.scss | 2 +- .../icons/azure-devops/placeholders.scss | 2 +- .../icons/icons/azure-devops/property-16.scss | 2 +- .../icons/icons/azure-devops/property-24.scss | 2 +- .../styles/base/icons/icons/azure/index.scss | 2 +- .../base/icons/icons/azure/keyframes.scss | 2 +- .../base/icons/icons/azure/placeholders.scss | 2 +- .../base/icons/icons/azure/property-16.scss | 2 +- .../base/icons/icons/azure/property-24.scss | 2 +- .../base/icons/icons/bank-vault/index.scss | 2 +- .../icons/icons/bank-vault/keyframes.scss | 2 +- .../icons/icons/bank-vault/placeholders.scss | 2 +- .../icons/icons/bank-vault/property-16.scss | 2 +- .../icons/icons/bank-vault/property-24.scss | 2 +- .../base/icons/icons/bar-chart-alt/index.scss | 2 +- .../icons/icons/bar-chart-alt/keyframes.scss | 2 +- .../icons/bar-chart-alt/placeholders.scss | 2 +- .../icons/bar-chart-alt/property-16.scss | 2 +- .../icons/bar-chart-alt/property-24.scss | 2 +- .../base/icons/icons/bar-chart/index.scss | 2 +- .../base/icons/icons/bar-chart/keyframes.scss | 2 +- .../icons/icons/bar-chart/placeholders.scss | 2 +- .../icons/icons/bar-chart/property-16.scss | 2 +- .../icons/icons/bar-chart/property-24.scss | 2 +- .../icons/icons/battery-charging/index.scss | 2 +- .../icons/battery-charging/keyframes.scss | 2 +- .../icons/battery-charging/placeholders.scss | 2 +- .../icons/battery-charging/property-16.scss | 2 +- .../icons/battery-charging/property-24.scss | 2 +- .../base/icons/icons/battery/index.scss | 2 +- .../base/icons/icons/battery/keyframes.scss | 2 +- .../icons/icons/battery/placeholders.scss | 2 +- .../base/icons/icons/battery/property-16.scss | 2 +- .../base/icons/icons/battery/property-24.scss | 2 +- .../styles/base/icons/icons/beaker/index.scss | 2 +- .../base/icons/icons/beaker/keyframes.scss | 2 +- .../base/icons/icons/beaker/placeholders.scss | 2 +- .../base/icons/icons/beaker/property-16.scss | 2 +- .../base/icons/icons/beaker/property-24.scss | 2 +- .../icons/icons/bell-active-fill/index.scss | 2 +- .../icons/bell-active-fill/keyframes.scss | 2 +- .../icons/bell-active-fill/placeholders.scss | 2 +- .../icons/bell-active-fill/property-16.scss | 2 +- .../icons/bell-active-fill/property-24.scss | 2 +- .../base/icons/icons/bell-active/index.scss | 2 +- .../icons/icons/bell-active/keyframes.scss | 2 +- .../icons/icons/bell-active/placeholders.scss | 2 +- .../icons/icons/bell-active/property-16.scss | 2 +- .../icons/icons/bell-active/property-24.scss | 2 +- .../base/icons/icons/bell-off/index.scss | 2 +- .../base/icons/icons/bell-off/keyframes.scss | 2 +- .../icons/icons/bell-off/placeholders.scss | 2 +- .../icons/icons/bell-off/property-16.scss | 2 +- .../icons/icons/bell-off/property-24.scss | 2 +- .../styles/base/icons/icons/bell/index.scss | 2 +- .../base/icons/icons/bell/keyframes.scss | 2 +- .../base/icons/icons/bell/placeholders.scss | 2 +- .../base/icons/icons/bell/property-16.scss | 2 +- .../base/icons/icons/bell/property-24.scss | 2 +- .../icons/icons/bitbucket-color/index.scss | 2 +- .../icons/bitbucket-color/keyframes.scss | 2 +- .../icons/bitbucket-color/placeholders.scss | 2 +- .../icons/bitbucket-color/property-16.scss | 2 +- .../icons/bitbucket-color/property-24.scss | 2 +- .../base/icons/icons/bitbucket/index.scss | 2 +- .../base/icons/icons/bitbucket/keyframes.scss | 2 +- .../icons/icons/bitbucket/placeholders.scss | 2 +- .../icons/icons/bitbucket/property-16.scss | 2 +- .../icons/icons/bitbucket/property-24.scss | 2 +- .../styles/base/icons/icons/bolt/index.scss | 2 +- .../base/icons/icons/bolt/keyframes.scss | 2 +- .../base/icons/icons/bolt/placeholders.scss | 2 +- .../icons/icons/bookmark-add-fill/index.scss | 2 +- .../icons/bookmark-add-fill/keyframes.scss | 2 +- .../icons/bookmark-add-fill/placeholders.scss | 2 +- .../icons/bookmark-add-fill/property-16.scss | 2 +- .../icons/bookmark-add-fill/property-24.scss | 2 +- .../base/icons/icons/bookmark-add/index.scss | 2 +- .../icons/icons/bookmark-add/keyframes.scss | 2 +- .../icons/bookmark-add/placeholders.scss | 2 +- .../icons/icons/bookmark-add/property-16.scss | 2 +- .../icons/icons/bookmark-add/property-24.scss | 2 +- .../base/icons/icons/bookmark-fill/index.scss | 2 +- .../icons/icons/bookmark-fill/keyframes.scss | 2 +- .../icons/bookmark-fill/placeholders.scss | 2 +- .../icons/bookmark-fill/property-16.scss | 2 +- .../icons/bookmark-fill/property-24.scss | 2 +- .../icons/bookmark-remove-fill/index.scss | 2 +- .../icons/bookmark-remove-fill/keyframes.scss | 2 +- .../bookmark-remove-fill/placeholders.scss | 2 +- .../bookmark-remove-fill/property-16.scss | 2 +- .../bookmark-remove-fill/property-24.scss | 2 +- .../icons/icons/bookmark-remove/index.scss | 2 +- .../icons/bookmark-remove/keyframes.scss | 2 +- .../icons/bookmark-remove/placeholders.scss | 2 +- .../icons/bookmark-remove/property-16.scss | 2 +- .../icons/bookmark-remove/property-24.scss | 2 +- .../base/icons/icons/bookmark/index.scss | 2 +- .../base/icons/icons/bookmark/keyframes.scss | 2 +- .../icons/icons/bookmark/placeholders.scss | 2 +- .../icons/icons/bookmark/property-16.scss | 2 +- .../icons/icons/bookmark/property-24.scss | 2 +- .../styles/base/icons/icons/bottom/index.scss | 2 +- .../base/icons/icons/bottom/keyframes.scss | 2 +- .../base/icons/icons/bottom/placeholders.scss | 2 +- .../base/icons/icons/bottom/property-16.scss | 2 +- .../base/icons/icons/bottom/property-24.scss | 2 +- .../icons/icons/boundary-color/index.scss | 2 +- .../icons/icons/boundary-color/keyframes.scss | 2 +- .../icons/boundary-color/placeholders.scss | 2 +- .../icons/boundary-color/property-16.scss | 2 +- .../icons/boundary-color/property-24.scss | 2 +- .../base/icons/icons/boundary/index.scss | 2 +- .../base/icons/icons/boundary/keyframes.scss | 2 +- .../icons/icons/boundary/placeholders.scss | 2 +- .../icons/icons/boundary/property-16.scss | 2 +- .../icons/icons/boundary/property-24.scss | 2 +- .../icons/icons/box-check-fill/index.scss | 2 +- .../icons/icons/box-check-fill/keyframes.scss | 2 +- .../icons/box-check-fill/placeholders.scss | 2 +- .../base/icons/icons/box-outline/index.scss | 2 +- .../icons/icons/box-outline/keyframes.scss | 2 +- .../icons/icons/box-outline/placeholders.scss | 2 +- .../styles/base/icons/icons/box/index.scss | 2 +- .../base/icons/icons/box/keyframes.scss | 2 +- .../base/icons/icons/box/placeholders.scss | 2 +- .../base/icons/icons/box/property-16.scss | 2 +- .../base/icons/icons/box/property-24.scss | 2 +- .../base/icons/icons/briefcase/index.scss | 2 +- .../base/icons/icons/briefcase/keyframes.scss | 2 +- .../icons/icons/briefcase/placeholders.scss | 2 +- .../icons/icons/briefcase/property-16.scss | 2 +- .../icons/icons/briefcase/property-24.scss | 2 +- .../base/icons/icons/broadcast/index.scss | 2 +- .../base/icons/icons/broadcast/keyframes.scss | 2 +- .../icons/icons/broadcast/placeholders.scss | 2 +- .../styles/base/icons/icons/bug/index.scss | 2 +- .../base/icons/icons/bug/keyframes.scss | 2 +- .../base/icons/icons/bug/placeholders.scss | 2 +- .../base/icons/icons/bug/property-16.scss | 2 +- .../base/icons/icons/bug/property-24.scss | 2 +- .../styles/base/icons/icons/build/index.scss | 2 +- .../base/icons/icons/build/keyframes.scss | 2 +- .../base/icons/icons/build/placeholders.scss | 2 +- .../base/icons/icons/build/property-16.scss | 2 +- .../base/icons/icons/build/property-24.scss | 2 +- .../styles/base/icons/icons/bulb/index.scss | 2 +- .../base/icons/icons/bulb/keyframes.scss | 2 +- .../base/icons/icons/bulb/placeholders.scss | 2 +- .../base/icons/icons/bulb/property-16.scss | 2 +- .../base/icons/icons/bulb/property-24.scss | 2 +- .../base/icons/icons/calendar/index.scss | 2 +- .../base/icons/icons/calendar/keyframes.scss | 2 +- .../icons/icons/calendar/placeholders.scss | 2 +- .../icons/icons/calendar/property-16.scss | 2 +- .../icons/icons/calendar/property-24.scss | 2 +- .../base/icons/icons/camera-off/index.scss | 2 +- .../icons/icons/camera-off/keyframes.scss | 2 +- .../icons/icons/camera-off/placeholders.scss | 2 +- .../icons/icons/camera-off/property-16.scss | 2 +- .../icons/icons/camera-off/property-24.scss | 2 +- .../styles/base/icons/icons/camera/index.scss | 2 +- .../base/icons/icons/camera/keyframes.scss | 2 +- .../base/icons/icons/camera/placeholders.scss | 2 +- .../base/icons/icons/camera/property-16.scss | 2 +- .../base/icons/icons/camera/property-24.scss | 2 +- .../icons/icons/cancel-circle-fill/index.scss | 2 +- .../icons/cancel-circle-fill/keyframes.scss | 2 +- .../cancel-circle-fill/placeholders.scss | 2 +- .../icons/cancel-circle-outline/index.scss | 2 +- .../cancel-circle-outline/keyframes.scss | 2 +- .../cancel-circle-outline/placeholders.scss | 2 +- .../base/icons/icons/cancel-plain/index.scss | 2 +- .../icons/icons/cancel-plain/keyframes.scss | 2 +- .../icons/cancel-plain/placeholders.scss | 2 +- .../icons/icons/cancel-square-fill/index.scss | 2 +- .../icons/cancel-square-fill/keyframes.scss | 2 +- .../cancel-square-fill/placeholders.scss | 2 +- .../icons/cancel-square-outline/index.scss | 2 +- .../cancel-square-outline/keyframes.scss | 2 +- .../cancel-square-outline/placeholders.scss | 2 +- .../base/icons/icons/caret-down/index.scss | 2 +- .../icons/icons/caret-down/keyframes.scss | 2 +- .../icons/icons/caret-down/placeholders.scss | 2 +- .../base/icons/icons/caret-up/index.scss | 2 +- .../base/icons/icons/caret-up/keyframes.scss | 2 +- .../icons/icons/caret-up/placeholders.scss | 2 +- .../styles/base/icons/icons/caret/index.scss | 2 +- .../base/icons/icons/caret/keyframes.scss | 2 +- .../base/icons/icons/caret/placeholders.scss | 2 +- .../base/icons/icons/caret/property-16.scss | 2 +- .../base/icons/icons/caret/property-24.scss | 2 +- .../styles/base/icons/icons/cast/index.scss | 2 +- .../base/icons/icons/cast/keyframes.scss | 2 +- .../base/icons/icons/cast/placeholders.scss | 2 +- .../base/icons/icons/cast/property-16.scss | 2 +- .../base/icons/icons/cast/property-24.scss | 2 +- .../base/icons/icons/certificate/index.scss | 2 +- .../icons/icons/certificate/keyframes.scss | 2 +- .../icons/icons/certificate/placeholders.scss | 2 +- .../icons/icons/certificate/property-16.scss | 2 +- .../icons/icons/certificate/property-24.scss | 2 +- .../base/icons/icons/change-circle/index.scss | 2 +- .../icons/icons/change-circle/keyframes.scss | 2 +- .../icons/change-circle/placeholders.scss | 2 +- .../icons/change-circle/property-16.scss | 2 +- .../icons/change-circle/property-24.scss | 2 +- .../base/icons/icons/change-square/index.scss | 2 +- .../icons/icons/change-square/keyframes.scss | 2 +- .../icons/change-square/placeholders.scss | 2 +- .../icons/change-square/property-16.scss | 2 +- .../icons/change-square/property-24.scss | 2 +- .../styles/base/icons/icons/change/index.scss | 2 +- .../base/icons/icons/change/keyframes.scss | 2 +- .../base/icons/icons/change/placeholders.scss | 2 +- .../base/icons/icons/change/property-16.scss | 2 +- .../base/icons/icons/change/property-24.scss | 2 +- .../icons/icons/check-circle-fill/index.scss | 2 +- .../icons/check-circle-fill/keyframes.scss | 2 +- .../icons/check-circle-fill/placeholders.scss | 2 +- .../icons/check-circle-fill/property-16.scss | 2 +- .../icons/check-circle-fill/property-24.scss | 2 +- .../icons/check-circle-outline/index.scss | 2 +- .../icons/check-circle-outline/keyframes.scss | 2 +- .../check-circle-outline/placeholders.scss | 2 +- .../base/icons/icons/check-circle/index.scss | 2 +- .../icons/icons/check-circle/keyframes.scss | 2 +- .../icons/check-circle/placeholders.scss | 2 +- .../icons/icons/check-circle/property-16.scss | 2 +- .../icons/icons/check-circle/property-24.scss | 2 +- .../icons/icons/check-diamond-fill/index.scss | 2 +- .../icons/check-diamond-fill/keyframes.scss | 2 +- .../check-diamond-fill/placeholders.scss | 2 +- .../icons/check-diamond-fill/property-16.scss | 2 +- .../icons/check-diamond-fill/property-24.scss | 2 +- .../base/icons/icons/check-diamond/index.scss | 2 +- .../icons/icons/check-diamond/keyframes.scss | 2 +- .../icons/check-diamond/placeholders.scss | 2 +- .../icons/check-diamond/property-16.scss | 2 +- .../icons/check-diamond/property-24.scss | 2 +- .../icons/icons/check-hexagon-fill/index.scss | 2 +- .../icons/check-hexagon-fill/keyframes.scss | 2 +- .../check-hexagon-fill/placeholders.scss | 2 +- .../icons/check-hexagon-fill/property-16.scss | 2 +- .../icons/check-hexagon-fill/property-24.scss | 2 +- .../base/icons/icons/check-hexagon/index.scss | 2 +- .../icons/icons/check-hexagon/keyframes.scss | 2 +- .../icons/check-hexagon/placeholders.scss | 2 +- .../icons/check-hexagon/property-16.scss | 2 +- .../icons/check-hexagon/property-24.scss | 2 +- .../base/icons/icons/check-plain/index.scss | 2 +- .../icons/icons/check-plain/keyframes.scss | 2 +- .../icons/icons/check-plain/placeholders.scss | 2 +- .../icons/icons/check-square-fill/index.scss | 2 +- .../icons/check-square-fill/keyframes.scss | 2 +- .../icons/check-square-fill/placeholders.scss | 2 +- .../icons/check-square-fill/property-16.scss | 2 +- .../icons/check-square-fill/property-24.scss | 2 +- .../base/icons/icons/check-square/index.scss | 2 +- .../icons/icons/check-square/keyframes.scss | 2 +- .../icons/check-square/placeholders.scss | 2 +- .../icons/icons/check-square/property-16.scss | 2 +- .../icons/icons/check-square/property-24.scss | 2 +- .../styles/base/icons/icons/check/index.scss | 2 +- .../base/icons/icons/check/keyframes.scss | 2 +- .../base/icons/icons/check/placeholders.scss | 2 +- .../base/icons/icons/check/property-16.scss | 2 +- .../base/icons/icons/check/property-24.scss | 2 +- .../base/icons/icons/chevron-down/index.scss | 2 +- .../icons/icons/chevron-down/keyframes.scss | 2 +- .../icons/chevron-down/placeholders.scss | 2 +- .../icons/icons/chevron-down/property-16.scss | 2 +- .../icons/icons/chevron-down/property-24.scss | 2 +- .../base/icons/icons/chevron-left/index.scss | 2 +- .../icons/icons/chevron-left/keyframes.scss | 2 +- .../icons/chevron-left/placeholders.scss | 2 +- .../icons/icons/chevron-left/property-16.scss | 2 +- .../icons/icons/chevron-left/property-24.scss | 2 +- .../base/icons/icons/chevron-right/index.scss | 2 +- .../icons/icons/chevron-right/keyframes.scss | 2 +- .../icons/chevron-right/placeholders.scss | 2 +- .../icons/chevron-right/property-16.scss | 2 +- .../icons/chevron-right/property-24.scss | 2 +- .../base/icons/icons/chevron-up/index.scss | 2 +- .../icons/icons/chevron-up/keyframes.scss | 2 +- .../icons/icons/chevron-up/placeholders.scss | 2 +- .../icons/icons/chevron-up/property-16.scss | 2 +- .../icons/icons/chevron-up/property-24.scss | 2 +- .../base/icons/icons/chevrons-down/index.scss | 2 +- .../icons/icons/chevrons-down/keyframes.scss | 2 +- .../icons/chevrons-down/placeholders.scss | 2 +- .../icons/chevrons-down/property-16.scss | 2 +- .../icons/chevrons-down/property-24.scss | 2 +- .../base/icons/icons/chevrons-left/index.scss | 2 +- .../icons/icons/chevrons-left/keyframes.scss | 2 +- .../icons/chevrons-left/placeholders.scss | 2 +- .../icons/chevrons-left/property-16.scss | 2 +- .../icons/chevrons-left/property-24.scss | 2 +- .../icons/icons/chevrons-right/index.scss | 2 +- .../icons/icons/chevrons-right/keyframes.scss | 2 +- .../icons/chevrons-right/placeholders.scss | 2 +- .../icons/chevrons-right/property-16.scss | 2 +- .../icons/chevrons-right/property-24.scss | 2 +- .../base/icons/icons/chevrons-up/index.scss | 2 +- .../icons/icons/chevrons-up/keyframes.scss | 2 +- .../icons/icons/chevrons-up/placeholders.scss | 2 +- .../icons/icons/chevrons-up/property-16.scss | 2 +- .../icons/icons/chevrons-up/property-24.scss | 2 +- .../base/icons/icons/circle-dot/index.scss | 2 +- .../icons/icons/circle-dot/keyframes.scss | 2 +- .../icons/icons/circle-dot/placeholders.scss | 2 +- .../icons/icons/circle-dot/property-16.scss | 2 +- .../icons/icons/circle-dot/property-24.scss | 2 +- .../base/icons/icons/circle-fill/index.scss | 2 +- .../icons/icons/circle-fill/keyframes.scss | 2 +- .../icons/icons/circle-fill/placeholders.scss | 2 +- .../icons/icons/circle-fill/property-16.scss | 2 +- .../icons/icons/circle-fill/property-24.scss | 2 +- .../base/icons/icons/circle-half/index.scss | 2 +- .../icons/icons/circle-half/keyframes.scss | 2 +- .../icons/icons/circle-half/placeholders.scss | 2 +- .../icons/icons/circle-half/property-16.scss | 2 +- .../icons/icons/circle-half/property-24.scss | 2 +- .../styles/base/icons/icons/circle/index.scss | 2 +- .../base/icons/icons/circle/keyframes.scss | 2 +- .../base/icons/icons/circle/placeholders.scss | 2 +- .../base/icons/icons/circle/property-16.scss | 2 +- .../base/icons/icons/circle/property-24.scss | 2 +- .../icons/icons/clipboard-checked/index.scss | 2 +- .../icons/clipboard-checked/keyframes.scss | 2 +- .../icons/clipboard-checked/placeholders.scss | 2 +- .../icons/clipboard-checked/property-16.scss | 2 +- .../icons/clipboard-checked/property-24.scss | 2 +- .../icons/icons/clipboard-copy/index.scss | 2 +- .../icons/icons/clipboard-copy/keyframes.scss | 2 +- .../icons/clipboard-copy/placeholders.scss | 2 +- .../icons/clipboard-copy/property-16.scss | 2 +- .../icons/clipboard-copy/property-24.scss | 2 +- .../base/icons/icons/clipboard/index.scss | 2 +- .../base/icons/icons/clipboard/keyframes.scss | 2 +- .../icons/icons/clipboard/placeholders.scss | 2 +- .../icons/icons/clipboard/property-16.scss | 2 +- .../icons/icons/clipboard/property-24.scss | 2 +- .../base/icons/icons/clock-fill/index.scss | 2 +- .../icons/icons/clock-fill/keyframes.scss | 2 +- .../icons/icons/clock-fill/placeholders.scss | 2 +- .../base/icons/icons/clock-outline/index.scss | 2 +- .../icons/icons/clock-outline/keyframes.scss | 2 +- .../icons/clock-outline/placeholders.scss | 2 +- .../styles/base/icons/icons/clock/index.scss | 2 +- .../base/icons/icons/clock/keyframes.scss | 2 +- .../base/icons/icons/clock/placeholders.scss | 2 +- .../base/icons/icons/clock/property-16.scss | 2 +- .../base/icons/icons/clock/property-24.scss | 2 +- .../base/icons/icons/cloud-check/index.scss | 2 +- .../icons/icons/cloud-check/keyframes.scss | 2 +- .../icons/icons/cloud-check/placeholders.scss | 2 +- .../icons/icons/cloud-check/property-16.scss | 2 +- .../icons/icons/cloud-check/property-24.scss | 2 +- .../base/icons/icons/cloud-cross/index.scss | 2 +- .../icons/icons/cloud-cross/keyframes.scss | 2 +- .../icons/icons/cloud-cross/placeholders.scss | 2 +- .../icons/icons/cloud-cross/property-16.scss | 2 +- .../icons/icons/cloud-cross/property-24.scss | 2 +- .../icons/icons/cloud-download/index.scss | 2 +- .../icons/icons/cloud-download/keyframes.scss | 2 +- .../icons/cloud-download/placeholders.scss | 2 +- .../icons/cloud-download/property-16.scss | 2 +- .../icons/cloud-download/property-24.scss | 2 +- .../icons/icons/cloud-lightning/index.scss | 2 +- .../icons/cloud-lightning/keyframes.scss | 2 +- .../icons/cloud-lightning/placeholders.scss | 2 +- .../icons/cloud-lightning/property-16.scss | 2 +- .../icons/cloud-lightning/property-24.scss | 2 +- .../base/icons/icons/cloud-lock/index.scss | 2 +- .../icons/icons/cloud-lock/keyframes.scss | 2 +- .../icons/icons/cloud-lock/placeholders.scss | 2 +- .../icons/icons/cloud-lock/property-16.scss | 2 +- .../icons/icons/cloud-lock/property-24.scss | 2 +- .../base/icons/icons/cloud-off/index.scss | 2 +- .../base/icons/icons/cloud-off/keyframes.scss | 2 +- .../icons/icons/cloud-off/placeholders.scss | 2 +- .../icons/icons/cloud-off/property-16.scss | 2 +- .../icons/icons/cloud-off/property-24.scss | 2 +- .../base/icons/icons/cloud-upload/index.scss | 2 +- .../icons/icons/cloud-upload/keyframes.scss | 2 +- .../icons/cloud-upload/placeholders.scss | 2 +- .../icons/icons/cloud-upload/property-16.scss | 2 +- .../icons/icons/cloud-upload/property-24.scss | 2 +- .../base/icons/icons/cloud-x/index.scss | 2 +- .../base/icons/icons/cloud-x/keyframes.scss | 2 +- .../icons/icons/cloud-x/placeholders.scss | 2 +- .../base/icons/icons/cloud-x/property-16.scss | 2 +- .../base/icons/icons/cloud-x/property-24.scss | 2 +- .../styles/base/icons/icons/cloud/index.scss | 2 +- .../base/icons/icons/cloud/keyframes.scss | 2 +- .../base/icons/icons/cloud/placeholders.scss | 2 +- .../base/icons/icons/cloud/property-16.scss | 2 +- .../base/icons/icons/cloud/property-24.scss | 2 +- .../styles/base/icons/icons/code/index.scss | 2 +- .../base/icons/icons/code/keyframes.scss | 2 +- .../base/icons/icons/code/placeholders.scss | 2 +- .../base/icons/icons/code/property-16.scss | 2 +- .../base/icons/icons/code/property-24.scss | 2 +- .../base/icons/icons/codepen-color/index.scss | 2 +- .../icons/icons/codepen-color/keyframes.scss | 2 +- .../icons/codepen-color/placeholders.scss | 2 +- .../icons/codepen-color/property-16.scss | 2 +- .../icons/codepen-color/property-24.scss | 2 +- .../base/icons/icons/codepen/index.scss | 2 +- .../base/icons/icons/codepen/keyframes.scss | 2 +- .../icons/icons/codepen/placeholders.scss | 2 +- .../base/icons/icons/codepen/property-16.scss | 2 +- .../base/icons/icons/codepen/property-24.scss | 2 +- .../base/icons/icons/collections/index.scss | 2 +- .../icons/icons/collections/keyframes.scss | 2 +- .../icons/icons/collections/placeholders.scss | 2 +- .../icons/icons/collections/property-16.scss | 2 +- .../icons/icons/collections/property-24.scss | 2 +- .../base/icons/icons/command/index.scss | 2 +- .../base/icons/icons/command/keyframes.scss | 2 +- .../icons/icons/command/placeholders.scss | 2 +- .../base/icons/icons/command/property-16.scss | 2 +- .../base/icons/icons/command/property-24.scss | 2 +- .../base/icons/icons/compass/index.scss | 2 +- .../base/icons/icons/compass/keyframes.scss | 2 +- .../icons/icons/compass/placeholders.scss | 2 +- .../base/icons/icons/compass/property-16.scss | 2 +- .../base/icons/icons/compass/property-24.scss | 2 +- .../icons/icons/connection-gateway/index.scss | 2 +- .../icons/connection-gateway/keyframes.scss | 2 +- .../connection-gateway/placeholders.scss | 2 +- .../icons/connection-gateway/property-16.scss | 2 +- .../icons/connection-gateway/property-24.scss | 2 +- .../base/icons/icons/connection/index.scss | 2 +- .../icons/icons/connection/keyframes.scss | 2 +- .../icons/icons/connection/placeholders.scss | 2 +- .../icons/icons/connection/property-16.scss | 2 +- .../icons/icons/connection/property-24.scss | 2 +- .../base/icons/icons/console/index.scss | 2 +- .../base/icons/icons/console/keyframes.scss | 2 +- .../icons/icons/console/placeholders.scss | 2 +- .../base/icons/icons/copy-action/index.scss | 2 +- .../icons/icons/copy-action/keyframes.scss | 2 +- .../icons/icons/copy-action/placeholders.scss | 2 +- .../base/icons/icons/copy-success/index.scss | 2 +- .../icons/icons/copy-success/keyframes.scss | 2 +- .../icons/copy-success/placeholders.scss | 2 +- .../icons/icons/corner-down-left/index.scss | 2 +- .../icons/corner-down-left/keyframes.scss | 2 +- .../icons/corner-down-left/placeholders.scss | 2 +- .../icons/corner-down-left/property-16.scss | 2 +- .../icons/corner-down-left/property-24.scss | 2 +- .../icons/icons/corner-down-right/index.scss | 2 +- .../icons/corner-down-right/keyframes.scss | 2 +- .../icons/corner-down-right/placeholders.scss | 2 +- .../icons/corner-down-right/property-16.scss | 2 +- .../icons/corner-down-right/property-24.scss | 2 +- .../icons/icons/corner-left-down/index.scss | 2 +- .../icons/corner-left-down/keyframes.scss | 2 +- .../icons/corner-left-down/placeholders.scss | 2 +- .../icons/corner-left-down/property-16.scss | 2 +- .../icons/corner-left-down/property-24.scss | 2 +- .../icons/icons/corner-left-up/index.scss | 2 +- .../icons/icons/corner-left-up/keyframes.scss | 2 +- .../icons/corner-left-up/placeholders.scss | 2 +- .../icons/corner-left-up/property-16.scss | 2 +- .../icons/corner-left-up/property-24.scss | 2 +- .../icons/icons/corner-right-down/index.scss | 2 +- .../icons/corner-right-down/keyframes.scss | 2 +- .../icons/corner-right-down/placeholders.scss | 2 +- .../icons/corner-right-down/property-16.scss | 2 +- .../icons/corner-right-down/property-24.scss | 2 +- .../icons/icons/corner-right-up/index.scss | 2 +- .../icons/corner-right-up/keyframes.scss | 2 +- .../icons/corner-right-up/placeholders.scss | 2 +- .../icons/corner-right-up/property-16.scss | 2 +- .../icons/corner-right-up/property-24.scss | 2 +- .../icons/icons/corner-up-left/index.scss | 2 +- .../icons/icons/corner-up-left/keyframes.scss | 2 +- .../icons/corner-up-left/placeholders.scss | 2 +- .../icons/corner-up-left/property-16.scss | 2 +- .../icons/corner-up-left/property-24.scss | 2 +- .../icons/icons/corner-up-right/index.scss | 2 +- .../icons/corner-up-right/keyframes.scss | 2 +- .../icons/corner-up-right/placeholders.scss | 2 +- .../icons/corner-up-right/property-16.scss | 2 +- .../icons/corner-up-right/property-24.scss | 2 +- .../styles/base/icons/icons/cpu/index.scss | 2 +- .../base/icons/icons/cpu/keyframes.scss | 2 +- .../base/icons/icons/cpu/placeholders.scss | 2 +- .../base/icons/icons/cpu/property-16.scss | 2 +- .../base/icons/icons/cpu/property-24.scss | 2 +- .../base/icons/icons/credit-card/index.scss | 2 +- .../icons/icons/credit-card/keyframes.scss | 2 +- .../icons/icons/credit-card/placeholders.scss | 2 +- .../icons/icons/credit-card/property-16.scss | 2 +- .../icons/icons/credit-card/property-24.scss | 2 +- .../styles/base/icons/icons/crop/index.scss | 2 +- .../base/icons/icons/crop/keyframes.scss | 2 +- .../base/icons/icons/crop/placeholders.scss | 2 +- .../base/icons/icons/crop/property-16.scss | 2 +- .../base/icons/icons/crop/property-24.scss | 2 +- .../base/icons/icons/crosshair/index.scss | 2 +- .../base/icons/icons/crosshair/keyframes.scss | 2 +- .../icons/icons/crosshair/placeholders.scss | 2 +- .../icons/icons/crosshair/property-16.scss | 2 +- .../icons/icons/crosshair/property-24.scss | 2 +- .../base/icons/icons/dashboard/index.scss | 2 +- .../base/icons/icons/dashboard/keyframes.scss | 2 +- .../icons/icons/dashboard/placeholders.scss | 2 +- .../icons/icons/dashboard/property-16.scss | 2 +- .../icons/icons/dashboard/property-24.scss | 2 +- .../base/icons/icons/database/index.scss | 2 +- .../base/icons/icons/database/keyframes.scss | 2 +- .../icons/icons/database/placeholders.scss | 2 +- .../icons/icons/database/property-16.scss | 2 +- .../icons/icons/database/property-24.scss | 2 +- .../styles/base/icons/icons/delay/index.scss | 2 +- .../base/icons/icons/delay/keyframes.scss | 2 +- .../base/icons/icons/delay/placeholders.scss | 2 +- .../base/icons/icons/delay/property-16.scss | 2 +- .../base/icons/icons/delay/property-24.scss | 2 +- .../styles/base/icons/icons/delete/index.scss | 2 +- .../base/icons/icons/delete/keyframes.scss | 2 +- .../base/icons/icons/delete/placeholders.scss | 2 +- .../base/icons/icons/delete/property-16.scss | 2 +- .../base/icons/icons/delete/property-24.scss | 2 +- .../base/icons/icons/deny-alt/index.scss | 2 +- .../base/icons/icons/deny-alt/keyframes.scss | 2 +- .../icons/icons/deny-alt/placeholders.scss | 2 +- .../base/icons/icons/deny-color/index.scss | 2 +- .../icons/icons/deny-color/keyframes.scss | 2 +- .../icons/icons/deny-color/placeholders.scss | 2 +- .../icons/icons/deny-color/property-16.scss | 2 +- .../icons/icons/deny-color/property-24.scss | 2 +- .../base/icons/icons/deny-default/index.scss | 2 +- .../icons/icons/deny-default/keyframes.scss | 2 +- .../icons/deny-default/placeholders.scss | 2 +- .../base/icons/icons/diamond-fill/index.scss | 2 +- .../icons/icons/diamond-fill/keyframes.scss | 2 +- .../icons/diamond-fill/placeholders.scss | 2 +- .../icons/icons/diamond-fill/property-16.scss | 2 +- .../icons/icons/diamond-fill/property-24.scss | 2 +- .../base/icons/icons/diamond/index.scss | 2 +- .../base/icons/icons/diamond/keyframes.scss | 2 +- .../icons/icons/diamond/placeholders.scss | 2 +- .../base/icons/icons/diamond/property-16.scss | 2 +- .../base/icons/icons/diamond/property-24.scss | 2 +- .../base/icons/icons/disabled/index.scss | 2 +- .../base/icons/icons/disabled/keyframes.scss | 2 +- .../icons/icons/disabled/placeholders.scss | 2 +- .../styles/base/icons/icons/disc/index.scss | 2 +- .../base/icons/icons/disc/keyframes.scss | 2 +- .../base/icons/icons/disc/placeholders.scss | 2 +- .../base/icons/icons/disc/property-16.scss | 2 +- .../base/icons/icons/disc/property-24.scss | 2 +- .../icons/icons/discussion-circle/index.scss | 2 +- .../icons/discussion-circle/keyframes.scss | 2 +- .../icons/discussion-circle/placeholders.scss | 2 +- .../icons/discussion-circle/property-16.scss | 2 +- .../icons/discussion-circle/property-24.scss | 2 +- .../icons/icons/discussion-square/index.scss | 2 +- .../icons/discussion-square/keyframes.scss | 2 +- .../icons/discussion-square/placeholders.scss | 2 +- .../icons/discussion-square/property-16.scss | 2 +- .../icons/discussion-square/property-24.scss | 2 +- .../base/icons/icons/docker-color/index.scss | 2 +- .../icons/icons/docker-color/keyframes.scss | 2 +- .../icons/docker-color/placeholders.scss | 2 +- .../icons/icons/docker-color/property-16.scss | 2 +- .../icons/icons/docker-color/property-24.scss | 2 +- .../styles/base/icons/icons/docker/index.scss | 2 +- .../base/icons/icons/docker/keyframes.scss | 2 +- .../base/icons/icons/docker/placeholders.scss | 2 +- .../base/icons/icons/docker/property-16.scss | 2 +- .../base/icons/icons/docker/property-24.scss | 2 +- .../base/icons/icons/docs-download/index.scss | 2 +- .../icons/icons/docs-download/keyframes.scss | 2 +- .../icons/docs-download/placeholders.scss | 2 +- .../icons/docs-download/property-16.scss | 2 +- .../icons/docs-download/property-24.scss | 2 +- .../base/icons/icons/docs-link/index.scss | 2 +- .../base/icons/icons/docs-link/keyframes.scss | 2 +- .../icons/icons/docs-link/placeholders.scss | 2 +- .../icons/icons/docs-link/property-16.scss | 2 +- .../icons/icons/docs-link/property-24.scss | 2 +- .../styles/base/icons/icons/docs/index.scss | 2 +- .../base/icons/icons/docs/keyframes.scss | 2 +- .../base/icons/icons/docs/placeholders.scss | 2 +- .../base/icons/icons/docs/property-16.scss | 2 +- .../base/icons/icons/docs/property-24.scss | 2 +- .../base/icons/icons/dollar-sign/index.scss | 2 +- .../icons/icons/dollar-sign/keyframes.scss | 2 +- .../icons/icons/dollar-sign/placeholders.scss | 2 +- .../icons/icons/dollar-sign/property-16.scss | 2 +- .../icons/icons/dollar-sign/property-24.scss | 2 +- .../base/icons/icons/dot-half/index.scss | 2 +- .../base/icons/icons/dot-half/keyframes.scss | 2 +- .../icons/icons/dot-half/placeholders.scss | 2 +- .../icons/icons/dot-half/property-16.scss | 2 +- .../icons/icons/dot-half/property-24.scss | 2 +- .../styles/base/icons/icons/dot/index.scss | 2 +- .../base/icons/icons/dot/keyframes.scss | 2 +- .../base/icons/icons/dot/placeholders.scss | 2 +- .../base/icons/icons/dot/property-16.scss | 2 +- .../base/icons/icons/dot/property-24.scss | 2 +- .../base/icons/icons/download/index.scss | 2 +- .../base/icons/icons/download/keyframes.scss | 2 +- .../icons/icons/download/placeholders.scss | 2 +- .../icons/icons/download/property-16.scss | 2 +- .../icons/icons/download/property-24.scss | 2 +- .../base/icons/icons/droplet/index.scss | 2 +- .../base/icons/icons/droplet/keyframes.scss | 2 +- .../icons/icons/droplet/placeholders.scss | 2 +- .../base/icons/icons/droplet/property-16.scss | 2 +- .../base/icons/icons/droplet/property-24.scss | 2 +- .../base/icons/icons/duplicate/index.scss | 2 +- .../base/icons/icons/duplicate/keyframes.scss | 2 +- .../icons/icons/duplicate/placeholders.scss | 2 +- .../icons/icons/duplicate/property-16.scss | 2 +- .../icons/icons/duplicate/property-24.scss | 2 +- .../styles/base/icons/icons/edit/index.scss | 2 +- .../base/icons/icons/edit/keyframes.scss | 2 +- .../base/icons/icons/edit/placeholders.scss | 2 +- .../base/icons/icons/edit/property-16.scss | 2 +- .../base/icons/icons/edit/property-24.scss | 2 +- .../base/icons/icons/enterprise/index.scss | 2 +- .../icons/icons/enterprise/keyframes.scss | 2 +- .../icons/icons/enterprise/placeholders.scss | 2 +- .../icons/icons/enterprise/property-16.scss | 2 +- .../icons/icons/enterprise/property-24.scss | 2 +- .../base/icons/icons/entry-point/index.scss | 2 +- .../icons/icons/entry-point/keyframes.scss | 2 +- .../icons/icons/entry-point/placeholders.scss | 2 +- .../icons/icons/entry-point/property-16.scss | 2 +- .../icons/icons/entry-point/property-24.scss | 2 +- .../icons/envelope-sealed-fill/index.scss | 2 +- .../icons/envelope-sealed-fill/keyframes.scss | 2 +- .../envelope-sealed-fill/placeholders.scss | 2 +- .../icons/envelope-sealed-outline/index.scss | 2 +- .../envelope-sealed-outline/keyframes.scss | 2 +- .../envelope-sealed-outline/placeholders.scss | 2 +- .../envelope-unsealed--outline/index.scss | 2 +- .../envelope-unsealed--outline/keyframes.scss | 2 +- .../placeholders.scss | 2 +- .../icons/envelope-unsealed-fill/index.scss | 2 +- .../envelope-unsealed-fill/keyframes.scss | 2 +- .../envelope-unsealed-fill/placeholders.scss | 2 +- .../styles/base/icons/icons/event/index.scss | 2 +- .../base/icons/icons/event/keyframes.scss | 2 +- .../base/icons/icons/event/placeholders.scss | 2 +- .../base/icons/icons/event/property-16.scss | 2 +- .../base/icons/icons/event/property-24.scss | 2 +- .../base/icons/icons/exit-point/index.scss | 2 +- .../icons/icons/exit-point/keyframes.scss | 2 +- .../icons/icons/exit-point/placeholders.scss | 2 +- .../icons/icons/exit-point/property-16.scss | 2 +- .../icons/icons/exit-point/property-24.scss | 2 +- .../styles/base/icons/icons/exit/index.scss | 2 +- .../base/icons/icons/exit/keyframes.scss | 2 +- .../base/icons/icons/exit/placeholders.scss | 2 +- .../base/icons/icons/expand-less/index.scss | 2 +- .../icons/icons/expand-less/keyframes.scss | 2 +- .../icons/icons/expand-less/placeholders.scss | 2 +- .../base/icons/icons/expand-more/index.scss | 2 +- .../icons/icons/expand-more/keyframes.scss | 2 +- .../icons/icons/expand-more/placeholders.scss | 2 +- .../base/icons/icons/external-link/index.scss | 2 +- .../icons/icons/external-link/keyframes.scss | 2 +- .../icons/external-link/placeholders.scss | 2 +- .../icons/external-link/property-16.scss | 2 +- .../icons/external-link/property-24.scss | 2 +- .../base/icons/icons/eye-off/index.scss | 2 +- .../base/icons/icons/eye-off/keyframes.scss | 2 +- .../icons/icons/eye-off/placeholders.scss | 2 +- .../base/icons/icons/eye-off/property-16.scss | 2 +- .../base/icons/icons/eye-off/property-24.scss | 2 +- .../styles/base/icons/icons/eye/index.scss | 2 +- .../base/icons/icons/eye/keyframes.scss | 2 +- .../base/icons/icons/eye/placeholders.scss | 2 +- .../base/icons/icons/eye/property-16.scss | 2 +- .../base/icons/icons/eye/property-24.scss | 2 +- .../base/icons/icons/f5-color/index.scss | 2 +- .../base/icons/icons/f5-color/keyframes.scss | 2 +- .../icons/icons/f5-color/placeholders.scss | 2 +- .../icons/icons/f5-color/property-16.scss | 2 +- .../icons/icons/f5-color/property-24.scss | 2 +- .../app/styles/base/icons/icons/f5/index.scss | 2 +- .../styles/base/icons/icons/f5/keyframes.scss | 2 +- .../base/icons/icons/f5/placeholders.scss | 2 +- .../base/icons/icons/f5/property-16.scss | 2 +- .../base/icons/icons/f5/property-24.scss | 2 +- .../icons/icons/facebook-color/index.scss | 2 +- .../icons/icons/facebook-color/keyframes.scss | 2 +- .../icons/facebook-color/placeholders.scss | 2 +- .../icons/facebook-color/property-16.scss | 2 +- .../icons/facebook-color/property-24.scss | 2 +- .../base/icons/icons/facebook/index.scss | 2 +- .../base/icons/icons/facebook/keyframes.scss | 2 +- .../icons/icons/facebook/placeholders.scss | 2 +- .../icons/icons/facebook/property-16.scss | 2 +- .../icons/icons/facebook/property-24.scss | 2 +- .../base/icons/icons/fast-forward/index.scss | 2 +- .../icons/icons/fast-forward/keyframes.scss | 2 +- .../icons/fast-forward/placeholders.scss | 2 +- .../icons/icons/fast-forward/property-16.scss | 2 +- .../icons/icons/fast-forward/property-24.scss | 2 +- .../base/icons/icons/file-change/index.scss | 2 +- .../icons/icons/file-change/keyframes.scss | 2 +- .../icons/icons/file-change/placeholders.scss | 2 +- .../icons/icons/file-change/property-16.scss | 2 +- .../icons/icons/file-change/property-24.scss | 2 +- .../base/icons/icons/file-check/index.scss | 2 +- .../icons/icons/file-check/keyframes.scss | 2 +- .../icons/icons/file-check/placeholders.scss | 2 +- .../icons/icons/file-check/property-16.scss | 2 +- .../icons/icons/file-check/property-24.scss | 2 +- .../base/icons/icons/file-diff/index.scss | 2 +- .../base/icons/icons/file-diff/keyframes.scss | 2 +- .../icons/icons/file-diff/placeholders.scss | 2 +- .../icons/icons/file-diff/property-16.scss | 2 +- .../icons/icons/file-diff/property-24.scss | 2 +- .../base/icons/icons/file-fill/index.scss | 2 +- .../base/icons/icons/file-fill/keyframes.scss | 2 +- .../icons/icons/file-fill/placeholders.scss | 2 +- .../base/icons/icons/file-minus/index.scss | 2 +- .../icons/icons/file-minus/keyframes.scss | 2 +- .../icons/icons/file-minus/placeholders.scss | 2 +- .../icons/icons/file-minus/property-16.scss | 2 +- .../icons/icons/file-minus/property-24.scss | 2 +- .../base/icons/icons/file-outline/index.scss | 2 +- .../icons/icons/file-outline/keyframes.scss | 2 +- .../icons/file-outline/placeholders.scss | 2 +- .../base/icons/icons/file-plus/index.scss | 2 +- .../base/icons/icons/file-plus/keyframes.scss | 2 +- .../icons/icons/file-plus/placeholders.scss | 2 +- .../icons/icons/file-plus/property-16.scss | 2 +- .../icons/icons/file-plus/property-24.scss | 2 +- .../base/icons/icons/file-source/index.scss | 2 +- .../icons/icons/file-source/keyframes.scss | 2 +- .../icons/icons/file-source/placeholders.scss | 2 +- .../icons/icons/file-source/property-16.scss | 2 +- .../icons/icons/file-source/property-24.scss | 2 +- .../base/icons/icons/file-text/index.scss | 2 +- .../base/icons/icons/file-text/keyframes.scss | 2 +- .../icons/icons/file-text/placeholders.scss | 2 +- .../icons/icons/file-text/property-16.scss | 2 +- .../icons/icons/file-text/property-24.scss | 2 +- .../styles/base/icons/icons/file-x/index.scss | 2 +- .../base/icons/icons/file-x/keyframes.scss | 2 +- .../base/icons/icons/file-x/placeholders.scss | 2 +- .../base/icons/icons/file-x/property-16.scss | 2 +- .../base/icons/icons/file-x/property-24.scss | 2 +- .../styles/base/icons/icons/file/index.scss | 2 +- .../base/icons/icons/file/keyframes.scss | 2 +- .../base/icons/icons/file/placeholders.scss | 2 +- .../base/icons/icons/file/property-16.scss | 2 +- .../base/icons/icons/file/property-24.scss | 2 +- .../styles/base/icons/icons/files/index.scss | 2 +- .../base/icons/icons/files/keyframes.scss | 2 +- .../base/icons/icons/files/placeholders.scss | 2 +- .../base/icons/icons/files/property-16.scss | 2 +- .../base/icons/icons/files/property-24.scss | 2 +- .../styles/base/icons/icons/film/index.scss | 2 +- .../base/icons/icons/film/keyframes.scss | 2 +- .../base/icons/icons/film/placeholders.scss | 2 +- .../base/icons/icons/film/property-16.scss | 2 +- .../base/icons/icons/film/property-24.scss | 2 +- .../base/icons/icons/filter-circle/index.scss | 2 +- .../icons/icons/filter-circle/keyframes.scss | 2 +- .../icons/filter-circle/placeholders.scss | 2 +- .../icons/filter-circle/property-16.scss | 2 +- .../icons/filter-circle/property-24.scss | 2 +- .../base/icons/icons/filter-fill/index.scss | 2 +- .../icons/icons/filter-fill/keyframes.scss | 2 +- .../icons/icons/filter-fill/placeholders.scss | 2 +- .../icons/icons/filter-fill/property-16.scss | 2 +- .../icons/icons/filter-fill/property-24.scss | 2 +- .../styles/base/icons/icons/filter/index.scss | 2 +- .../base/icons/icons/filter/keyframes.scss | 2 +- .../base/icons/icons/filter/placeholders.scss | 2 +- .../base/icons/icons/filter/property-16.scss | 2 +- .../base/icons/icons/filter/property-24.scss | 2 +- .../base/icons/icons/fingerprint/index.scss | 2 +- .../icons/icons/fingerprint/keyframes.scss | 2 +- .../icons/icons/fingerprint/placeholders.scss | 2 +- .../icons/icons/fingerprint/property-16.scss | 2 +- .../icons/icons/fingerprint/property-24.scss | 2 +- .../styles/base/icons/icons/flag/index.scss | 2 +- .../base/icons/icons/flag/keyframes.scss | 2 +- .../base/icons/icons/flag/placeholders.scss | 2 +- .../base/icons/icons/flag/property-16.scss | 2 +- .../base/icons/icons/flag/property-24.scss | 2 +- .../base/icons/icons/folder-fill/index.scss | 2 +- .../icons/icons/folder-fill/keyframes.scss | 2 +- .../icons/icons/folder-fill/placeholders.scss | 2 +- .../icons/icons/folder-fill/property-16.scss | 2 +- .../icons/icons/folder-fill/property-24.scss | 2 +- .../icons/icons/folder-minus-fill/index.scss | 2 +- .../icons/folder-minus-fill/keyframes.scss | 2 +- .../icons/folder-minus-fill/placeholders.scss | 2 +- .../icons/folder-minus-fill/property-16.scss | 2 +- .../icons/folder-minus-fill/property-24.scss | 2 +- .../base/icons/icons/folder-minus/index.scss | 2 +- .../icons/icons/folder-minus/keyframes.scss | 2 +- .../icons/folder-minus/placeholders.scss | 2 +- .../icons/icons/folder-minus/property-16.scss | 2 +- .../icons/icons/folder-minus/property-24.scss | 2 +- .../icons/icons/folder-outline/index.scss | 2 +- .../icons/icons/folder-outline/keyframes.scss | 2 +- .../icons/folder-outline/placeholders.scss | 2 +- .../icons/icons/folder-plus-fill/index.scss | 2 +- .../icons/folder-plus-fill/keyframes.scss | 2 +- .../icons/folder-plus-fill/placeholders.scss | 2 +- .../icons/folder-plus-fill/property-16.scss | 2 +- .../icons/folder-plus-fill/property-24.scss | 2 +- .../base/icons/icons/folder-plus/index.scss | 2 +- .../icons/icons/folder-plus/keyframes.scss | 2 +- .../icons/icons/folder-plus/placeholders.scss | 2 +- .../icons/icons/folder-plus/property-16.scss | 2 +- .../icons/icons/folder-plus/property-24.scss | 2 +- .../base/icons/icons/folder-star/index.scss | 2 +- .../icons/icons/folder-star/keyframes.scss | 2 +- .../icons/icons/folder-star/placeholders.scss | 2 +- .../icons/icons/folder-star/property-16.scss | 2 +- .../icons/icons/folder-star/property-24.scss | 2 +- .../base/icons/icons/folder-users/index.scss | 2 +- .../icons/icons/folder-users/keyframes.scss | 2 +- .../icons/folder-users/placeholders.scss | 2 +- .../icons/icons/folder-users/property-16.scss | 2 +- .../icons/icons/folder-users/property-24.scss | 2 +- .../styles/base/icons/icons/folder/index.scss | 2 +- .../base/icons/icons/folder/keyframes.scss | 2 +- .../base/icons/icons/folder/placeholders.scss | 2 +- .../base/icons/icons/folder/property-16.scss | 2 +- .../base/icons/icons/folder/property-24.scss | 2 +- .../styles/base/icons/icons/frown/index.scss | 2 +- .../base/icons/icons/frown/keyframes.scss | 2 +- .../base/icons/icons/frown/placeholders.scss | 2 +- .../base/icons/icons/frown/property-16.scss | 2 +- .../base/icons/icons/frown/property-24.scss | 2 +- .../base/icons/icons/gateway/index.scss | 2 +- .../base/icons/icons/gateway/keyframes.scss | 2 +- .../icons/icons/gateway/placeholders.scss | 2 +- .../base/icons/icons/gateway/property-16.scss | 2 +- .../base/icons/icons/gateway/property-24.scss | 2 +- .../base/icons/icons/gcp-color/index.scss | 2 +- .../base/icons/icons/gcp-color/keyframes.scss | 2 +- .../icons/icons/gcp-color/placeholders.scss | 2 +- .../icons/icons/gcp-color/property-16.scss | 2 +- .../icons/icons/gcp-color/property-24.scss | 2 +- .../styles/base/icons/icons/gcp/index.scss | 2 +- .../base/icons/icons/gcp/keyframes.scss | 2 +- .../base/icons/icons/gcp/placeholders.scss | 2 +- .../base/icons/icons/gcp/property-16.scss | 2 +- .../base/icons/icons/gcp/property-24.scss | 2 +- .../base/icons/icons/gift-fill/index.scss | 2 +- .../base/icons/icons/gift-fill/keyframes.scss | 2 +- .../icons/icons/gift-fill/placeholders.scss | 2 +- .../base/icons/icons/gift-outline/index.scss | 2 +- .../icons/icons/gift-outline/keyframes.scss | 2 +- .../icons/gift-outline/placeholders.scss | 2 +- .../styles/base/icons/icons/gift/index.scss | 2 +- .../base/icons/icons/gift/keyframes.scss | 2 +- .../base/icons/icons/gift/placeholders.scss | 2 +- .../base/icons/icons/gift/property-16.scss | 2 +- .../base/icons/icons/gift/property-24.scss | 2 +- .../base/icons/icons/git-branch/index.scss | 2 +- .../icons/icons/git-branch/keyframes.scss | 2 +- .../icons/icons/git-branch/placeholders.scss | 2 +- .../icons/icons/git-branch/property-16.scss | 2 +- .../icons/icons/git-branch/property-24.scss | 2 +- .../base/icons/icons/git-commit/index.scss | 2 +- .../icons/icons/git-commit/keyframes.scss | 2 +- .../icons/icons/git-commit/placeholders.scss | 2 +- .../icons/icons/git-commit/property-16.scss | 2 +- .../icons/icons/git-commit/property-24.scss | 2 +- .../base/icons/icons/git-merge/index.scss | 2 +- .../base/icons/icons/git-merge/keyframes.scss | 2 +- .../icons/icons/git-merge/placeholders.scss | 2 +- .../icons/icons/git-merge/property-16.scss | 2 +- .../icons/icons/git-merge/property-24.scss | 2 +- .../icons/icons/git-pull-request/index.scss | 2 +- .../icons/git-pull-request/keyframes.scss | 2 +- .../icons/git-pull-request/placeholders.scss | 2 +- .../icons/git-pull-request/property-16.scss | 2 +- .../icons/git-pull-request/property-24.scss | 2 +- .../base/icons/icons/git-repo/index.scss | 2 +- .../base/icons/icons/git-repo/keyframes.scss | 2 +- .../icons/icons/git-repo/placeholders.scss | 2 +- .../icons/icons/git-repo/property-16.scss | 2 +- .../icons/icons/git-repo/property-24.scss | 2 +- .../icons/icons/git-repository/index.scss | 2 +- .../icons/icons/git-repository/keyframes.scss | 2 +- .../icons/git-repository/placeholders.scss | 2 +- .../base/icons/icons/github-color/index.scss | 2 +- .../icons/icons/github-color/keyframes.scss | 2 +- .../icons/github-color/placeholders.scss | 2 +- .../icons/icons/github-color/property-16.scss | 2 +- .../icons/icons/github-color/property-24.scss | 2 +- .../styles/base/icons/icons/github/index.scss | 2 +- .../base/icons/icons/github/keyframes.scss | 2 +- .../base/icons/icons/github/placeholders.scss | 2 +- .../base/icons/icons/github/property-16.scss | 2 +- .../base/icons/icons/github/property-24.scss | 2 +- .../base/icons/icons/gitlab-color/index.scss | 2 +- .../icons/icons/gitlab-color/keyframes.scss | 2 +- .../icons/gitlab-color/placeholders.scss | 2 +- .../icons/icons/gitlab-color/property-16.scss | 2 +- .../icons/icons/gitlab-color/property-24.scss | 2 +- .../styles/base/icons/icons/gitlab/index.scss | 2 +- .../base/icons/icons/gitlab/keyframes.scss | 2 +- .../base/icons/icons/gitlab/placeholders.scss | 2 +- .../base/icons/icons/gitlab/property-16.scss | 2 +- .../base/icons/icons/gitlab/property-24.scss | 2 +- .../base/icons/icons/globe-private/index.scss | 2 +- .../icons/icons/globe-private/keyframes.scss | 2 +- .../icons/globe-private/placeholders.scss | 2 +- .../icons/globe-private/property-16.scss | 2 +- .../icons/globe-private/property-24.scss | 2 +- .../styles/base/icons/icons/globe/index.scss | 2 +- .../base/icons/icons/globe/keyframes.scss | 2 +- .../base/icons/icons/globe/placeholders.scss | 2 +- .../base/icons/icons/globe/property-16.scss | 2 +- .../base/icons/icons/globe/property-24.scss | 2 +- .../base/icons/icons/google-color/index.scss | 2 +- .../icons/icons/google-color/keyframes.scss | 2 +- .../icons/google-color/placeholders.scss | 2 +- .../icons/icons/google-color/property-16.scss | 2 +- .../icons/icons/google-color/property-24.scss | 2 +- .../styles/base/icons/icons/google/index.scss | 2 +- .../base/icons/icons/google/keyframes.scss | 2 +- .../base/icons/icons/google/placeholders.scss | 2 +- .../base/icons/icons/google/property-16.scss | 2 +- .../base/icons/icons/google/property-24.scss | 2 +- .../base/icons/icons/grid-alt/index.scss | 2 +- .../base/icons/icons/grid-alt/keyframes.scss | 2 +- .../icons/icons/grid-alt/placeholders.scss | 2 +- .../icons/icons/grid-alt/property-16.scss | 2 +- .../icons/icons/grid-alt/property-24.scss | 2 +- .../styles/base/icons/icons/grid/index.scss | 2 +- .../base/icons/icons/grid/keyframes.scss | 2 +- .../base/icons/icons/grid/placeholders.scss | 2 +- .../base/icons/icons/grid/property-16.scss | 2 +- .../base/icons/icons/grid/property-24.scss | 2 +- .../base/icons/icons/guide-link/index.scss | 2 +- .../icons/icons/guide-link/keyframes.scss | 2 +- .../icons/icons/guide-link/placeholders.scss | 2 +- .../icons/icons/guide-link/property-16.scss | 2 +- .../icons/icons/guide-link/property-24.scss | 2 +- .../styles/base/icons/icons/guide/index.scss | 2 +- .../base/icons/icons/guide/keyframes.scss | 2 +- .../base/icons/icons/guide/placeholders.scss | 2 +- .../base/icons/icons/guide/property-16.scss | 2 +- .../base/icons/icons/guide/property-24.scss | 2 +- .../styles/base/icons/icons/hammer/index.scss | 2 +- .../base/icons/icons/hammer/keyframes.scss | 2 +- .../base/icons/icons/hammer/placeholders.scss | 2 +- .../base/icons/icons/hammer/property-16.scss | 2 +- .../base/icons/icons/hammer/property-24.scss | 2 +- .../base/icons/icons/handshake/index.scss | 2 +- .../base/icons/icons/handshake/keyframes.scss | 2 +- .../icons/icons/handshake/placeholders.scss | 2 +- .../icons/icons/handshake/property-16.scss | 2 +- .../icons/icons/handshake/property-24.scss | 2 +- .../base/icons/icons/hard-drive/index.scss | 2 +- .../icons/icons/hard-drive/keyframes.scss | 2 +- .../icons/icons/hard-drive/placeholders.scss | 2 +- .../icons/icons/hard-drive/property-16.scss | 2 +- .../icons/icons/hard-drive/property-24.scss | 2 +- .../styles/base/icons/icons/hash/index.scss | 2 +- .../base/icons/icons/hash/keyframes.scss | 2 +- .../base/icons/icons/hash/placeholders.scss | 2 +- .../base/icons/icons/hash/property-16.scss | 2 +- .../base/icons/icons/hash/property-24.scss | 2 +- .../icons/icons/hashicorp-color/index.scss | 2 +- .../icons/hashicorp-color/keyframes.scss | 2 +- .../icons/hashicorp-color/placeholders.scss | 2 +- .../icons/hashicorp-color/property-16.scss | 2 +- .../icons/hashicorp-color/property-24.scss | 2 +- .../base/icons/icons/hashicorp/index.scss | 2 +- .../base/icons/icons/hashicorp/keyframes.scss | 2 +- .../icons/icons/hashicorp/placeholders.scss | 2 +- .../icons/icons/hashicorp/property-16.scss | 2 +- .../icons/icons/hashicorp/property-24.scss | 2 +- .../base/icons/icons/hcp-color/index.scss | 2 +- .../base/icons/icons/hcp-color/keyframes.scss | 2 +- .../icons/icons/hcp-color/placeholders.scss | 2 +- .../icons/icons/hcp-color/property-16.scss | 2 +- .../icons/icons/hcp-color/property-24.scss | 2 +- .../styles/base/icons/icons/hcp/index.scss | 2 +- .../base/icons/icons/hcp/keyframes.scss | 2 +- .../base/icons/icons/hcp/placeholders.scss | 2 +- .../base/icons/icons/hcp/property-16.scss | 2 +- .../base/icons/icons/hcp/property-24.scss | 2 +- .../base/icons/icons/headphones/index.scss | 2 +- .../icons/icons/headphones/keyframes.scss | 2 +- .../icons/icons/headphones/placeholders.scss | 2 +- .../icons/icons/headphones/property-16.scss | 2 +- .../icons/icons/headphones/property-24.scss | 2 +- .../styles/base/icons/icons/health/index.scss | 2 +- .../base/icons/icons/health/keyframes.scss | 2 +- .../base/icons/icons/health/placeholders.scss | 2 +- .../base/icons/icons/heart-fill/index.scss | 2 +- .../icons/icons/heart-fill/keyframes.scss | 2 +- .../icons/icons/heart-fill/placeholders.scss | 2 +- .../icons/icons/heart-fill/property-16.scss | 2 +- .../icons/icons/heart-fill/property-24.scss | 2 +- .../base/icons/icons/heart-off/index.scss | 2 +- .../base/icons/icons/heart-off/keyframes.scss | 2 +- .../icons/icons/heart-off/placeholders.scss | 2 +- .../icons/icons/heart-off/property-16.scss | 2 +- .../icons/icons/heart-off/property-24.scss | 2 +- .../styles/base/icons/icons/heart/index.scss | 2 +- .../base/icons/icons/heart/keyframes.scss | 2 +- .../base/icons/icons/heart/placeholders.scss | 2 +- .../base/icons/icons/heart/property-16.scss | 2 +- .../base/icons/icons/heart/property-24.scss | 2 +- .../icons/icons/help-circle-fill/index.scss | 2 +- .../icons/help-circle-fill/keyframes.scss | 2 +- .../icons/help-circle-fill/placeholders.scss | 2 +- .../icons/help-circle-outline/index.scss | 2 +- .../icons/help-circle-outline/keyframes.scss | 2 +- .../help-circle-outline/placeholders.scss | 2 +- .../styles/base/icons/icons/help/index.scss | 2 +- .../base/icons/icons/help/keyframes.scss | 2 +- .../base/icons/icons/help/placeholders.scss | 2 +- .../base/icons/icons/help/property-16.scss | 2 +- .../base/icons/icons/help/property-24.scss | 2 +- .../base/icons/icons/hexagon-fill/index.scss | 2 +- .../icons/icons/hexagon-fill/keyframes.scss | 2 +- .../icons/hexagon-fill/placeholders.scss | 2 +- .../icons/icons/hexagon-fill/property-16.scss | 2 +- .../icons/icons/hexagon-fill/property-24.scss | 2 +- .../base/icons/icons/hexagon/index.scss | 2 +- .../base/icons/icons/hexagon/keyframes.scss | 2 +- .../icons/icons/hexagon/placeholders.scss | 2 +- .../base/icons/icons/hexagon/property-16.scss | 2 +- .../base/icons/icons/hexagon/property-24.scss | 2 +- .../base/icons/icons/history/index.scss | 2 +- .../base/icons/icons/history/keyframes.scss | 2 +- .../icons/icons/history/placeholders.scss | 2 +- .../base/icons/icons/history/property-16.scss | 2 +- .../base/icons/icons/history/property-24.scss | 2 +- .../styles/base/icons/icons/home/index.scss | 2 +- .../base/icons/icons/home/keyframes.scss | 2 +- .../base/icons/icons/home/placeholders.scss | 2 +- .../base/icons/icons/home/property-16.scss | 2 +- .../base/icons/icons/home/property-24.scss | 2 +- .../base/icons/icons/hourglass/index.scss | 2 +- .../base/icons/icons/hourglass/keyframes.scss | 2 +- .../icons/icons/hourglass/placeholders.scss | 2 +- .../icons/icons/hourglass/property-16.scss | 2 +- .../icons/icons/hourglass/property-24.scss | 2 +- .../icons/icons/identity-service/index.scss | 2 +- .../icons/identity-service/keyframes.scss | 2 +- .../icons/identity-service/placeholders.scss | 2 +- .../icons/identity-service/property-16.scss | 2 +- .../icons/identity-service/property-24.scss | 2 +- .../base/icons/icons/identity-user/index.scss | 2 +- .../icons/icons/identity-user/keyframes.scss | 2 +- .../icons/identity-user/placeholders.scss | 2 +- .../icons/identity-user/property-16.scss | 2 +- .../icons/identity-user/property-24.scss | 2 +- .../styles/base/icons/icons/image/index.scss | 2 +- .../base/icons/icons/image/keyframes.scss | 2 +- .../base/icons/icons/image/placeholders.scss | 2 +- .../base/icons/icons/image/property-16.scss | 2 +- .../base/icons/icons/image/property-24.scss | 2 +- .../styles/base/icons/icons/inbox/index.scss | 2 +- .../base/icons/icons/inbox/keyframes.scss | 2 +- .../base/icons/icons/inbox/placeholders.scss | 2 +- .../base/icons/icons/inbox/property-16.scss | 2 +- .../base/icons/icons/inbox/property-24.scss | 2 +- .../app/styles/base/icons/icons/index.scss | 2 +- .../icons/icons/info-circle-fill/index.scss | 2 +- .../icons/info-circle-fill/keyframes.scss | 2 +- .../icons/info-circle-fill/placeholders.scss | 2 +- .../icons/info-circle-outline/index.scss | 2 +- .../icons/info-circle-outline/keyframes.scss | 2 +- .../info-circle-outline/placeholders.scss | 2 +- .../styles/base/icons/icons/info/index.scss | 2 +- .../base/icons/icons/info/keyframes.scss | 2 +- .../base/icons/icons/info/placeholders.scss | 2 +- .../base/icons/icons/info/property-16.scss | 2 +- .../base/icons/icons/info/property-24.scss | 2 +- .../base/icons/icons/jump-link/index.scss | 2 +- .../base/icons/icons/jump-link/keyframes.scss | 2 +- .../icons/icons/jump-link/placeholders.scss | 2 +- .../icons/icons/jump-link/property-16.scss | 2 +- .../icons/icons/jump-link/property-24.scss | 2 +- .../base/icons/icons/key-values/index.scss | 2 +- .../icons/icons/key-values/keyframes.scss | 2 +- .../icons/icons/key-values/placeholders.scss | 2 +- .../icons/icons/key-values/property-16.scss | 2 +- .../icons/icons/key-values/property-24.scss | 2 +- .../styles/base/icons/icons/key/index.scss | 2 +- .../base/icons/icons/key/keyframes.scss | 2 +- .../base/icons/icons/key/placeholders.scss | 2 +- .../base/icons/icons/key/property-16.scss | 2 +- .../base/icons/icons/key/property-24.scss | 2 +- .../base/icons/icons/keychain/index.scss | 2 +- .../base/icons/icons/keychain/keyframes.scss | 2 +- .../icons/icons/keychain/placeholders.scss | 2 +- .../icons/icons/keychain/property-16.scss | 2 +- .../icons/icons/keychain/property-24.scss | 2 +- .../icons/icons/kubernetes-color/index.scss | 2 +- .../icons/kubernetes-color/keyframes.scss | 2 +- .../icons/kubernetes-color/placeholders.scss | 2 +- .../icons/kubernetes-color/property-16.scss | 2 +- .../icons/kubernetes-color/property-24.scss | 2 +- .../base/icons/icons/kubernetes/index.scss | 2 +- .../icons/icons/kubernetes/keyframes.scss | 2 +- .../icons/icons/kubernetes/placeholders.scss | 2 +- .../icons/icons/kubernetes/property-16.scss | 2 +- .../icons/icons/kubernetes/property-24.scss | 2 +- .../base/icons/icons/labyrinth/index.scss | 2 +- .../base/icons/icons/labyrinth/keyframes.scss | 2 +- .../icons/icons/labyrinth/placeholders.scss | 2 +- .../icons/icons/labyrinth/property-16.scss | 2 +- .../icons/icons/labyrinth/property-24.scss | 2 +- .../styles/base/icons/icons/layers/index.scss | 2 +- .../base/icons/icons/layers/keyframes.scss | 2 +- .../base/icons/icons/layers/placeholders.scss | 2 +- .../base/icons/icons/layers/property-16.scss | 2 +- .../base/icons/icons/layers/property-24.scss | 2 +- .../styles/base/icons/icons/layout/index.scss | 2 +- .../base/icons/icons/layout/keyframes.scss | 2 +- .../base/icons/icons/layout/placeholders.scss | 2 +- .../base/icons/icons/layout/property-16.scss | 2 +- .../base/icons/icons/layout/property-24.scss | 2 +- .../base/icons/icons/learn-link/index.scss | 2 +- .../icons/icons/learn-link/keyframes.scss | 2 +- .../icons/icons/learn-link/placeholders.scss | 2 +- .../icons/icons/learn-link/property-16.scss | 2 +- .../icons/icons/learn-link/property-24.scss | 2 +- .../styles/base/icons/icons/learn/index.scss | 2 +- .../base/icons/icons/learn/keyframes.scss | 2 +- .../base/icons/icons/learn/placeholders.scss | 2 +- .../base/icons/icons/learn/property-16.scss | 2 +- .../base/icons/icons/learn/property-24.scss | 2 +- .../base/icons/icons/line-chart-up/index.scss | 2 +- .../icons/icons/line-chart-up/keyframes.scss | 2 +- .../icons/line-chart-up/placeholders.scss | 2 +- .../icons/line-chart-up/property-16.scss | 2 +- .../icons/line-chart-up/property-24.scss | 2 +- .../base/icons/icons/line-chart/index.scss | 2 +- .../icons/icons/line-chart/keyframes.scss | 2 +- .../icons/icons/line-chart/placeholders.scss | 2 +- .../icons/icons/line-chart/property-16.scss | 2 +- .../icons/icons/line-chart/property-24.scss | 2 +- .../styles/base/icons/icons/link/index.scss | 2 +- .../base/icons/icons/link/keyframes.scss | 2 +- .../base/icons/icons/link/placeholders.scss | 2 +- .../base/icons/icons/link/property-16.scss | 2 +- .../base/icons/icons/link/property-24.scss | 2 +- .../icons/icons/linkedin-color/index.scss | 2 +- .../icons/icons/linkedin-color/keyframes.scss | 2 +- .../icons/linkedin-color/placeholders.scss | 2 +- .../icons/linkedin-color/property-16.scss | 2 +- .../icons/linkedin-color/property-24.scss | 2 +- .../base/icons/icons/linkedin/index.scss | 2 +- .../base/icons/icons/linkedin/keyframes.scss | 2 +- .../icons/icons/linkedin/placeholders.scss | 2 +- .../icons/icons/linkedin/property-16.scss | 2 +- .../icons/icons/linkedin/property-24.scss | 2 +- .../styles/base/icons/icons/list/index.scss | 2 +- .../base/icons/icons/list/keyframes.scss | 2 +- .../base/icons/icons/list/placeholders.scss | 2 +- .../base/icons/icons/list/property-16.scss | 2 +- .../base/icons/icons/list/property-24.scss | 2 +- .../base/icons/icons/load-balancer/index.scss | 2 +- .../icons/icons/load-balancer/keyframes.scss | 2 +- .../icons/load-balancer/placeholders.scss | 2 +- .../icons/load-balancer/property-16.scss | 2 +- .../icons/load-balancer/property-24.scss | 2 +- .../icons/icons/loading-motion/index.scss | 2 +- .../icons/icons/loading-motion/keyframes.scss | 2 +- .../icons/loading-motion/placeholders.scss | 2 +- .../icons/loading-motion/property-16.scss | 2 +- .../icons/loading-motion/property-24.scss | 2 +- .../base/icons/icons/loading/index.scss | 2 +- .../base/icons/icons/loading/keyframes.scss | 2 +- .../icons/icons/loading/placeholders.scss | 2 +- .../base/icons/icons/loading/property-16.scss | 2 +- .../base/icons/icons/loading/property-24.scss | 2 +- .../icons/icons/lock-closed-fill/index.scss | 2 +- .../icons/lock-closed-fill/keyframes.scss | 2 +- .../icons/lock-closed-fill/placeholders.scss | 2 +- .../icons/lock-closed-outline/index.scss | 2 +- .../icons/lock-closed-outline/keyframes.scss | 2 +- .../lock-closed-outline/placeholders.scss | 2 +- .../base/icons/icons/lock-closed/index.scss | 2 +- .../icons/icons/lock-closed/keyframes.scss | 2 +- .../icons/icons/lock-closed/placeholders.scss | 2 +- .../base/icons/icons/lock-disabled/index.scss | 2 +- .../icons/icons/lock-disabled/keyframes.scss | 2 +- .../icons/lock-disabled/placeholders.scss | 2 +- .../base/icons/icons/lock-fill/index.scss | 2 +- .../base/icons/icons/lock-fill/keyframes.scss | 2 +- .../icons/icons/lock-fill/placeholders.scss | 2 +- .../icons/icons/lock-fill/property-16.scss | 2 +- .../icons/icons/lock-fill/property-24.scss | 2 +- .../base/icons/icons/lock-off/index.scss | 2 +- .../base/icons/icons/lock-off/keyframes.scss | 2 +- .../icons/icons/lock-off/placeholders.scss | 2 +- .../icons/icons/lock-off/property-16.scss | 2 +- .../icons/icons/lock-off/property-24.scss | 2 +- .../base/icons/icons/lock-open/index.scss | 2 +- .../base/icons/icons/lock-open/keyframes.scss | 2 +- .../icons/icons/lock-open/placeholders.scss | 2 +- .../styles/base/icons/icons/lock/index.scss | 2 +- .../base/icons/icons/lock/keyframes.scss | 2 +- .../base/icons/icons/lock/placeholders.scss | 2 +- .../base/icons/icons/lock/property-16.scss | 2 +- .../base/icons/icons/lock/property-24.scss | 2 +- .../icons/logo-alicloud-color/index.scss | 2 +- .../icons/logo-alicloud-color/keyframes.scss | 2 +- .../logo-alicloud-color/placeholders.scss | 2 +- .../icons/logo-alicloud-monochrome/index.scss | 2 +- .../logo-alicloud-monochrome/keyframes.scss | 2 +- .../placeholders.scss | 2 +- .../icons/icons/logo-auth0-color/index.scss | 2 +- .../icons/logo-auth0-color/keyframes.scss | 2 +- .../icons/logo-auth0-color/placeholders.scss | 2 +- .../icons/icons/logo-aws-color/index.scss | 2 +- .../icons/icons/logo-aws-color/keyframes.scss | 2 +- .../icons/logo-aws-color/placeholders.scss | 2 +- .../icons/logo-aws-monochrome/index.scss | 2 +- .../icons/logo-aws-monochrome/keyframes.scss | 2 +- .../logo-aws-monochrome/placeholders.scss | 2 +- .../icons/icons/logo-azure-color/index.scss | 2 +- .../icons/logo-azure-color/keyframes.scss | 2 +- .../icons/logo-azure-color/placeholders.scss | 2 +- .../icons/logo-azure-dev-ops-color/index.scss | 2 +- .../logo-azure-dev-ops-color/keyframes.scss | 2 +- .../placeholders.scss | 2 +- .../logo-azure-dev-ops-monochrome/index.scss | 2 +- .../keyframes.scss | 2 +- .../placeholders.scss | 2 +- .../icons/logo-azure-monochrome/index.scss | 2 +- .../logo-azure-monochrome/keyframes.scss | 2 +- .../logo-azure-monochrome/placeholders.scss | 2 +- .../icons/logo-bitbucket-color/index.scss | 2 +- .../icons/logo-bitbucket-color/keyframes.scss | 2 +- .../logo-bitbucket-color/placeholders.scss | 2 +- .../logo-bitbucket-monochrome/index.scss | 2 +- .../logo-bitbucket-monochrome/keyframes.scss | 2 +- .../placeholders.scss | 2 +- .../icons/logo-ember-circle-color/index.scss | 2 +- .../logo-ember-circle-color/keyframes.scss | 2 +- .../logo-ember-circle-color/placeholders.scss | 2 +- .../logo-ember-circle-color/property-16.scss | 2 +- .../logo-ember-circle-color/property-24.scss | 2 +- .../icons/icons/logo-gcp-color/index.scss | 2 +- .../icons/icons/logo-gcp-color/keyframes.scss | 2 +- .../icons/logo-gcp-color/placeholders.scss | 2 +- .../icons/logo-gcp-monochrome/index.scss | 2 +- .../icons/logo-gcp-monochrome/keyframes.scss | 2 +- .../logo-gcp-monochrome/placeholders.scss | 2 +- .../icons/icons/logo-github-color/index.scss | 2 +- .../icons/logo-github-color/keyframes.scss | 2 +- .../icons/logo-github-color/placeholders.scss | 2 +- .../icons/logo-github-monochrome/index.scss | 2 +- .../logo-github-monochrome/keyframes.scss | 2 +- .../logo-github-monochrome/placeholders.scss | 2 +- .../icons/icons/logo-gitlab-color/index.scss | 2 +- .../icons/logo-gitlab-color/keyframes.scss | 2 +- .../icons/logo-gitlab-color/placeholders.scss | 2 +- .../icons/logo-gitlab-monochrome/index.scss | 2 +- .../logo-gitlab-monochrome/keyframes.scss | 2 +- .../logo-gitlab-monochrome/placeholders.scss | 2 +- .../icons/icons/logo-glimmer-color/index.scss | 2 +- .../icons/logo-glimmer-color/keyframes.scss | 2 +- .../logo-glimmer-color/placeholders.scss | 2 +- .../icons/logo-glimmer-color/property-16.scss | 2 +- .../icons/logo-glimmer-color/property-24.scss | 2 +- .../icons/icons/logo-google-color/index.scss | 2 +- .../icons/logo-google-color/keyframes.scss | 2 +- .../icons/logo-google-color/placeholders.scss | 2 +- .../icons/logo-google-monochrome/index.scss | 2 +- .../logo-google-monochrome/keyframes.scss | 2 +- .../logo-google-monochrome/placeholders.scss | 2 +- .../icons/logo-hashicorp-color/index.scss | 2 +- .../icons/logo-hashicorp-color/keyframes.scss | 2 +- .../logo-hashicorp-color/placeholders.scss | 2 +- .../logo-hashicorp-color/property-16.scss | 2 +- .../logo-hashicorp-color/property-24.scss | 2 +- .../icons/icons/logo-jwt-color/index.scss | 2 +- .../icons/icons/logo-jwt-color/keyframes.scss | 2 +- .../icons/logo-jwt-color/placeholders.scss | 2 +- .../icons/logo-jwt-color/property-16.scss | 2 +- .../icons/logo-jwt-color/property-24.scss | 2 +- .../icons/logo-kubernetes-color/index.scss | 2 +- .../logo-kubernetes-color/keyframes.scss | 2 +- .../logo-kubernetes-color/placeholders.scss | 2 +- .../logo-kubernetes-monochrome/index.scss | 2 +- .../logo-kubernetes-monochrome/keyframes.scss | 2 +- .../placeholders.scss | 2 +- .../icons/logo-microsoft-color/index.scss | 2 +- .../icons/logo-microsoft-color/keyframes.scss | 2 +- .../logo-microsoft-color/placeholders.scss | 2 +- .../icons/icons/logo-oidc-color/index.scss | 2 +- .../icons/logo-oidc-color/keyframes.scss | 2 +- .../icons/logo-oidc-color/placeholders.scss | 2 +- .../icons/logo-oidc-color/property-16.scss | 2 +- .../icons/logo-oidc-color/property-24.scss | 2 +- .../icons/icons/logo-okta-color/index.scss | 2 +- .../icons/logo-okta-color/keyframes.scss | 2 +- .../icons/logo-okta-color/placeholders.scss | 2 +- .../icons/icons/logo-oracle-color/index.scss | 2 +- .../icons/logo-oracle-color/keyframes.scss | 2 +- .../icons/logo-oracle-color/placeholders.scss | 2 +- .../icons/logo-oracle-monochrome/index.scss | 2 +- .../logo-oracle-monochrome/keyframes.scss | 2 +- .../logo-oracle-monochrome/placeholders.scss | 2 +- .../icons/icons/logo-slack-color/index.scss | 2 +- .../icons/logo-slack-color/keyframes.scss | 2 +- .../icons/logo-slack-color/placeholders.scss | 2 +- .../icons/logo-slack-monochrome/index.scss | 2 +- .../logo-slack-monochrome/keyframes.scss | 2 +- .../logo-slack-monochrome/placeholders.scss | 2 +- .../icons/icons/logo-vmware-color/index.scss | 2 +- .../icons/logo-vmware-color/keyframes.scss | 2 +- .../icons/logo-vmware-color/placeholders.scss | 2 +- .../icons/logo-vmware-monochrome/index.scss | 2 +- .../logo-vmware-monochrome/keyframes.scss | 2 +- .../logo-vmware-monochrome/placeholders.scss | 2 +- .../base/icons/icons/mail-open/index.scss | 2 +- .../base/icons/icons/mail-open/keyframes.scss | 2 +- .../icons/icons/mail-open/placeholders.scss | 2 +- .../icons/icons/mail-open/property-16.scss | 2 +- .../icons/icons/mail-open/property-24.scss | 2 +- .../styles/base/icons/icons/mail/index.scss | 2 +- .../base/icons/icons/mail/keyframes.scss | 2 +- .../base/icons/icons/mail/placeholders.scss | 2 +- .../base/icons/icons/mail/property-16.scss | 2 +- .../base/icons/icons/mail/property-24.scss | 2 +- .../base/icons/icons/mainframe/index.scss | 2 +- .../base/icons/icons/mainframe/keyframes.scss | 2 +- .../icons/icons/mainframe/placeholders.scss | 2 +- .../icons/icons/mainframe/property-16.scss | 2 +- .../icons/icons/mainframe/property-24.scss | 2 +- .../base/icons/icons/map-pin/index.scss | 2 +- .../base/icons/icons/map-pin/keyframes.scss | 2 +- .../icons/icons/map-pin/placeholders.scss | 2 +- .../base/icons/icons/map-pin/property-16.scss | 2 +- .../base/icons/icons/map-pin/property-24.scss | 2 +- .../styles/base/icons/icons/map/index.scss | 2 +- .../base/icons/icons/map/keyframes.scss | 2 +- .../base/icons/icons/map/placeholders.scss | 2 +- .../base/icons/icons/map/property-16.scss | 2 +- .../base/icons/icons/map/property-24.scss | 2 +- .../base/icons/icons/maximize-alt/index.scss | 2 +- .../icons/icons/maximize-alt/keyframes.scss | 2 +- .../icons/maximize-alt/placeholders.scss | 2 +- .../icons/icons/maximize-alt/property-16.scss | 2 +- .../icons/icons/maximize-alt/property-24.scss | 2 +- .../base/icons/icons/maximize/index.scss | 2 +- .../base/icons/icons/maximize/keyframes.scss | 2 +- .../icons/icons/maximize/placeholders.scss | 2 +- .../icons/icons/maximize/property-16.scss | 2 +- .../icons/icons/maximize/property-24.scss | 2 +- .../styles/base/icons/icons/meh/index.scss | 2 +- .../base/icons/icons/meh/keyframes.scss | 2 +- .../base/icons/icons/meh/placeholders.scss | 2 +- .../base/icons/icons/meh/property-16.scss | 2 +- .../base/icons/icons/meh/property-24.scss | 2 +- .../styles/base/icons/icons/menu/index.scss | 2 +- .../base/icons/icons/menu/keyframes.scss | 2 +- .../base/icons/icons/menu/placeholders.scss | 2 +- .../base/icons/icons/menu/property-16.scss | 2 +- .../base/icons/icons/menu/property-24.scss | 2 +- .../styles/base/icons/icons/mesh/index.scss | 2 +- .../base/icons/icons/mesh/keyframes.scss | 2 +- .../base/icons/icons/mesh/placeholders.scss | 2 +- .../base/icons/icons/mesh/property-16.scss | 2 +- .../base/icons/icons/mesh/property-24.scss | 2 +- .../icons/message-circle-fill/index.scss | 2 +- .../icons/message-circle-fill/keyframes.scss | 2 +- .../message-circle-fill/placeholders.scss | 2 +- .../message-circle-fill/property-16.scss | 2 +- .../message-circle-fill/property-24.scss | 2 +- .../icons/icons/message-circle/index.scss | 2 +- .../icons/icons/message-circle/keyframes.scss | 2 +- .../icons/message-circle/placeholders.scss | 2 +- .../icons/message-circle/property-16.scss | 2 +- .../icons/message-circle/property-24.scss | 2 +- .../icons/message-square-fill/index.scss | 2 +- .../icons/message-square-fill/keyframes.scss | 2 +- .../message-square-fill/placeholders.scss | 2 +- .../message-square-fill/property-16.scss | 2 +- .../message-square-fill/property-24.scss | 2 +- .../icons/icons/message-square/index.scss | 2 +- .../icons/icons/message-square/keyframes.scss | 2 +- .../icons/message-square/placeholders.scss | 2 +- .../icons/message-square/property-16.scss | 2 +- .../icons/message-square/property-24.scss | 2 +- .../base/icons/icons/message/index.scss | 2 +- .../base/icons/icons/message/keyframes.scss | 2 +- .../icons/icons/message/placeholders.scss | 2 +- .../base/icons/icons/mic-off/index.scss | 2 +- .../base/icons/icons/mic-off/keyframes.scss | 2 +- .../icons/icons/mic-off/placeholders.scss | 2 +- .../base/icons/icons/mic-off/property-16.scss | 2 +- .../base/icons/icons/mic-off/property-24.scss | 2 +- .../styles/base/icons/icons/mic/index.scss | 2 +- .../base/icons/icons/mic/keyframes.scss | 2 +- .../base/icons/icons/mic/placeholders.scss | 2 +- .../base/icons/icons/mic/property-16.scss | 2 +- .../base/icons/icons/mic/property-24.scss | 2 +- .../icons/icons/microsoft-color/index.scss | 2 +- .../icons/microsoft-color/keyframes.scss | 2 +- .../icons/microsoft-color/placeholders.scss | 2 +- .../icons/microsoft-color/property-16.scss | 2 +- .../icons/microsoft-color/property-24.scss | 2 +- .../base/icons/icons/microsoft/index.scss | 2 +- .../base/icons/icons/microsoft/keyframes.scss | 2 +- .../icons/icons/microsoft/placeholders.scss | 2 +- .../icons/icons/microsoft/property-16.scss | 2 +- .../icons/icons/microsoft/property-24.scss | 2 +- .../base/icons/icons/migrate/index.scss | 2 +- .../base/icons/icons/migrate/keyframes.scss | 2 +- .../icons/icons/migrate/placeholders.scss | 2 +- .../base/icons/icons/migrate/property-16.scss | 2 +- .../base/icons/icons/migrate/property-24.scss | 2 +- .../base/icons/icons/minimize-alt/index.scss | 2 +- .../icons/icons/minimize-alt/keyframes.scss | 2 +- .../icons/minimize-alt/placeholders.scss | 2 +- .../icons/icons/minimize-alt/property-16.scss | 2 +- .../icons/icons/minimize-alt/property-24.scss | 2 +- .../base/icons/icons/minimize/index.scss | 2 +- .../base/icons/icons/minimize/keyframes.scss | 2 +- .../icons/icons/minimize/placeholders.scss | 2 +- .../icons/icons/minimize/property-16.scss | 2 +- .../icons/icons/minimize/property-24.scss | 2 +- .../icons/icons/minus-circle-fill/index.scss | 2 +- .../icons/minus-circle-fill/keyframes.scss | 2 +- .../icons/minus-circle-fill/placeholders.scss | 2 +- .../icons/minus-circle-outline/index.scss | 2 +- .../icons/minus-circle-outline/keyframes.scss | 2 +- .../minus-circle-outline/placeholders.scss | 2 +- .../base/icons/icons/minus-circle/index.scss | 2 +- .../icons/icons/minus-circle/keyframes.scss | 2 +- .../icons/minus-circle/placeholders.scss | 2 +- .../icons/icons/minus-circle/property-16.scss | 2 +- .../icons/icons/minus-circle/property-24.scss | 2 +- .../base/icons/icons/minus-plain/index.scss | 2 +- .../icons/icons/minus-plain/keyframes.scss | 2 +- .../icons/icons/minus-plain/placeholders.scss | 2 +- .../icons/icons/minus-plus-circle/index.scss | 2 +- .../icons/minus-plus-circle/keyframes.scss | 2 +- .../icons/minus-plus-circle/placeholders.scss | 2 +- .../icons/minus-plus-circle/property-16.scss | 2 +- .../icons/minus-plus-circle/property-24.scss | 2 +- .../icons/icons/minus-plus-square/index.scss | 2 +- .../icons/minus-plus-square/keyframes.scss | 2 +- .../icons/minus-plus-square/placeholders.scss | 2 +- .../icons/minus-plus-square/property-16.scss | 2 +- .../icons/minus-plus-square/property-24.scss | 2 +- .../base/icons/icons/minus-plus/index.scss | 2 +- .../icons/icons/minus-plus/keyframes.scss | 2 +- .../icons/icons/minus-plus/placeholders.scss | 2 +- .../icons/icons/minus-plus/property-16.scss | 2 +- .../icons/icons/minus-plus/property-24.scss | 2 +- .../icons/icons/minus-square-fill/index.scss | 2 +- .../icons/minus-square-fill/keyframes.scss | 2 +- .../icons/minus-square-fill/placeholders.scss | 2 +- .../base/icons/icons/minus-square/index.scss | 2 +- .../icons/icons/minus-square/keyframes.scss | 2 +- .../icons/minus-square/placeholders.scss | 2 +- .../icons/icons/minus-square/property-16.scss | 2 +- .../icons/icons/minus-square/property-24.scss | 2 +- .../styles/base/icons/icons/minus/index.scss | 2 +- .../base/icons/icons/minus/keyframes.scss | 2 +- .../base/icons/icons/minus/placeholders.scss | 2 +- .../base/icons/icons/minus/property-16.scss | 2 +- .../base/icons/icons/minus/property-24.scss | 2 +- .../styles/base/icons/icons/module/index.scss | 2 +- .../base/icons/icons/module/keyframes.scss | 2 +- .../base/icons/icons/module/placeholders.scss | 2 +- .../base/icons/icons/module/property-16.scss | 2 +- .../base/icons/icons/module/property-24.scss | 2 +- .../base/icons/icons/monitor/index.scss | 2 +- .../base/icons/icons/monitor/keyframes.scss | 2 +- .../icons/icons/monitor/placeholders.scss | 2 +- .../base/icons/icons/monitor/property-16.scss | 2 +- .../base/icons/icons/monitor/property-24.scss | 2 +- .../styles/base/icons/icons/moon/index.scss | 2 +- .../base/icons/icons/moon/keyframes.scss | 2 +- .../base/icons/icons/moon/placeholders.scss | 2 +- .../base/icons/icons/moon/property-16.scss | 2 +- .../base/icons/icons/moon/property-24.scss | 2 +- .../icons/icons/more-horizontal/index.scss | 2 +- .../icons/more-horizontal/keyframes.scss | 2 +- .../icons/more-horizontal/placeholders.scss | 2 +- .../icons/more-horizontal/property-16.scss | 2 +- .../icons/more-horizontal/property-24.scss | 2 +- .../base/icons/icons/more-vertical/index.scss | 2 +- .../icons/icons/more-vertical/keyframes.scss | 2 +- .../icons/more-vertical/placeholders.scss | 2 +- .../icons/more-vertical/property-16.scss | 2 +- .../icons/more-vertical/property-24.scss | 2 +- .../base/icons/icons/mouse-pointer/index.scss | 2 +- .../icons/icons/mouse-pointer/keyframes.scss | 2 +- .../icons/mouse-pointer/placeholders.scss | 2 +- .../icons/mouse-pointer/property-16.scss | 2 +- .../icons/mouse-pointer/property-24.scss | 2 +- .../styles/base/icons/icons/move/index.scss | 2 +- .../base/icons/icons/move/keyframes.scss | 2 +- .../base/icons/icons/move/placeholders.scss | 2 +- .../base/icons/icons/move/property-16.scss | 2 +- .../base/icons/icons/move/property-24.scss | 2 +- .../styles/base/icons/icons/music/index.scss | 2 +- .../base/icons/icons/music/keyframes.scss | 2 +- .../base/icons/icons/music/placeholders.scss | 2 +- .../base/icons/icons/music/property-16.scss | 2 +- .../base/icons/icons/music/property-24.scss | 2 +- .../icons/icons/navigation-alt/index.scss | 2 +- .../icons/icons/navigation-alt/keyframes.scss | 2 +- .../icons/navigation-alt/placeholders.scss | 2 +- .../icons/navigation-alt/property-16.scss | 2 +- .../icons/navigation-alt/property-24.scss | 2 +- .../base/icons/icons/navigation/index.scss | 2 +- .../icons/icons/navigation/keyframes.scss | 2 +- .../icons/icons/navigation/placeholders.scss | 2 +- .../icons/icons/navigation/property-16.scss | 2 +- .../icons/icons/navigation/property-24.scss | 2 +- .../base/icons/icons/network-alt/index.scss | 2 +- .../icons/icons/network-alt/keyframes.scss | 2 +- .../icons/icons/network-alt/placeholders.scss | 2 +- .../icons/icons/network-alt/property-16.scss | 2 +- .../icons/icons/network-alt/property-24.scss | 2 +- .../base/icons/icons/network/index.scss | 2 +- .../base/icons/icons/network/keyframes.scss | 2 +- .../icons/icons/network/placeholders.scss | 2 +- .../base/icons/icons/network/property-16.scss | 2 +- .../base/icons/icons/network/property-24.scss | 2 +- .../base/icons/icons/newspaper/index.scss | 2 +- .../base/icons/icons/newspaper/keyframes.scss | 2 +- .../icons/icons/newspaper/placeholders.scss | 2 +- .../icons/icons/newspaper/property-16.scss | 2 +- .../icons/icons/newspaper/property-24.scss | 2 +- .../styles/base/icons/icons/node/index.scss | 2 +- .../base/icons/icons/node/keyframes.scss | 2 +- .../base/icons/icons/node/placeholders.scss | 2 +- .../base/icons/icons/node/property-16.scss | 2 +- .../base/icons/icons/node/property-24.scss | 2 +- .../base/icons/icons/nomad-color/index.scss | 2 +- .../icons/icons/nomad-color/keyframes.scss | 2 +- .../icons/icons/nomad-color/placeholders.scss | 2 +- .../icons/icons/nomad-color/property-16.scss | 2 +- .../icons/icons/nomad-color/property-24.scss | 2 +- .../styles/base/icons/icons/nomad/index.scss | 2 +- .../base/icons/icons/nomad/keyframes.scss | 2 +- .../base/icons/icons/nomad/placeholders.scss | 2 +- .../base/icons/icons/nomad/property-16.scss | 2 +- .../base/icons/icons/nomad/property-24.scss | 2 +- .../icons/notification-disabled/index.scss | 2 +- .../notification-disabled/keyframes.scss | 2 +- .../notification-disabled/placeholders.scss | 2 +- .../icons/icons/notification-fill/index.scss | 2 +- .../icons/notification-fill/keyframes.scss | 2 +- .../icons/notification-fill/placeholders.scss | 2 +- .../icons/notification-outline/index.scss | 2 +- .../icons/notification-outline/keyframes.scss | 2 +- .../notification-outline/placeholders.scss | 2 +- .../base/icons/icons/octagon/index.scss | 2 +- .../base/icons/icons/octagon/keyframes.scss | 2 +- .../icons/icons/octagon/placeholders.scss | 2 +- .../base/icons/icons/octagon/property-16.scss | 2 +- .../base/icons/icons/octagon/property-24.scss | 2 +- .../base/icons/icons/okta-color/index.scss | 2 +- .../icons/icons/okta-color/keyframes.scss | 2 +- .../icons/icons/okta-color/placeholders.scss | 2 +- .../icons/icons/okta-color/property-16.scss | 2 +- .../icons/icons/okta-color/property-24.scss | 2 +- .../styles/base/icons/icons/okta/index.scss | 2 +- .../base/icons/icons/okta/keyframes.scss | 2 +- .../base/icons/icons/okta/placeholders.scss | 2 +- .../base/icons/icons/okta/property-16.scss | 2 +- .../base/icons/icons/okta/property-24.scss | 2 +- .../base/icons/icons/oracle-color/index.scss | 2 +- .../icons/icons/oracle-color/keyframes.scss | 2 +- .../icons/oracle-color/placeholders.scss | 2 +- .../icons/icons/oracle-color/property-16.scss | 2 +- .../icons/icons/oracle-color/property-24.scss | 2 +- .../styles/base/icons/icons/oracle/index.scss | 2 +- .../base/icons/icons/oracle/keyframes.scss | 2 +- .../base/icons/icons/oracle/placeholders.scss | 2 +- .../base/icons/icons/oracle/property-16.scss | 2 +- .../base/icons/icons/oracle/property-24.scss | 2 +- .../styles/base/icons/icons/org/index.scss | 2 +- .../base/icons/icons/org/keyframes.scss | 2 +- .../base/icons/icons/org/placeholders.scss | 2 +- .../base/icons/icons/org/property-16.scss | 2 +- .../base/icons/icons/org/property-24.scss | 2 +- .../base/icons/icons/outline/index.scss | 2 +- .../base/icons/icons/outline/keyframes.scss | 2 +- .../icons/icons/outline/placeholders.scss | 2 +- .../base/icons/icons/outline/property-16.scss | 2 +- .../base/icons/icons/outline/property-24.scss | 2 +- .../base/icons/icons/pack-color/index.scss | 2 +- .../icons/icons/pack-color/keyframes.scss | 2 +- .../icons/icons/pack-color/placeholders.scss | 2 +- .../icons/icons/pack-color/property-16.scss | 2 +- .../icons/icons/pack-color/property-24.scss | 2 +- .../styles/base/icons/icons/pack/index.scss | 2 +- .../base/icons/icons/pack/keyframes.scss | 2 +- .../base/icons/icons/pack/placeholders.scss | 2 +- .../base/icons/icons/pack/property-16.scss | 2 +- .../base/icons/icons/pack/property-24.scss | 2 +- .../base/icons/icons/package/index.scss | 2 +- .../base/icons/icons/package/keyframes.scss | 2 +- .../icons/icons/package/placeholders.scss | 2 +- .../base/icons/icons/package/property-16.scss | 2 +- .../base/icons/icons/package/property-24.scss | 2 +- .../base/icons/icons/packer-color/index.scss | 2 +- .../icons/icons/packer-color/keyframes.scss | 2 +- .../icons/packer-color/placeholders.scss | 2 +- .../icons/icons/packer-color/property-16.scss | 2 +- .../icons/icons/packer-color/property-24.scss | 2 +- .../styles/base/icons/icons/packer/index.scss | 2 +- .../base/icons/icons/packer/keyframes.scss | 2 +- .../base/icons/icons/packer/placeholders.scss | 2 +- .../base/icons/icons/packer/property-16.scss | 2 +- .../base/icons/icons/packer/property-24.scss | 2 +- .../base/icons/icons/page-outline/index.scss | 2 +- .../icons/icons/page-outline/keyframes.scss | 2 +- .../icons/page-outline/placeholders.scss | 2 +- .../base/icons/icons/paperclip/index.scss | 2 +- .../base/icons/icons/paperclip/keyframes.scss | 2 +- .../icons/icons/paperclip/placeholders.scss | 2 +- .../icons/icons/paperclip/property-16.scss | 2 +- .../icons/icons/paperclip/property-24.scss | 2 +- .../base/icons/icons/partner/index.scss | 2 +- .../base/icons/icons/partner/keyframes.scss | 2 +- .../icons/icons/partner/placeholders.scss | 2 +- .../styles/base/icons/icons/path/index.scss | 2 +- .../base/icons/icons/path/keyframes.scss | 2 +- .../base/icons/icons/path/placeholders.scss | 2 +- .../base/icons/icons/path/property-16.scss | 2 +- .../base/icons/icons/path/property-24.scss | 2 +- .../base/icons/icons/pause-circle/index.scss | 2 +- .../icons/icons/pause-circle/keyframes.scss | 2 +- .../icons/pause-circle/placeholders.scss | 2 +- .../icons/icons/pause-circle/property-16.scss | 2 +- .../icons/icons/pause-circle/property-24.scss | 2 +- .../styles/base/icons/icons/pause/index.scss | 2 +- .../base/icons/icons/pause/keyframes.scss | 2 +- .../base/icons/icons/pause/placeholders.scss | 2 +- .../base/icons/icons/pause/property-16.scss | 2 +- .../base/icons/icons/pause/property-24.scss | 2 +- .../base/icons/icons/pen-tool/index.scss | 2 +- .../base/icons/icons/pen-tool/keyframes.scss | 2 +- .../icons/icons/pen-tool/placeholders.scss | 2 +- .../icons/icons/pen-tool/property-16.scss | 2 +- .../icons/icons/pen-tool/property-24.scss | 2 +- .../base/icons/icons/pencil-tool/index.scss | 2 +- .../icons/icons/pencil-tool/keyframes.scss | 2 +- .../icons/icons/pencil-tool/placeholders.scss | 2 +- .../icons/icons/pencil-tool/property-16.scss | 2 +- .../icons/icons/pencil-tool/property-24.scss | 2 +- .../base/icons/icons/phone-call/index.scss | 2 +- .../icons/icons/phone-call/keyframes.scss | 2 +- .../icons/icons/phone-call/placeholders.scss | 2 +- .../icons/icons/phone-call/property-16.scss | 2 +- .../icons/icons/phone-call/property-24.scss | 2 +- .../base/icons/icons/phone-off/index.scss | 2 +- .../base/icons/icons/phone-off/keyframes.scss | 2 +- .../icons/icons/phone-off/placeholders.scss | 2 +- .../icons/icons/phone-off/property-16.scss | 2 +- .../icons/icons/phone-off/property-24.scss | 2 +- .../styles/base/icons/icons/phone/index.scss | 2 +- .../base/icons/icons/phone/keyframes.scss | 2 +- .../base/icons/icons/phone/placeholders.scss | 2 +- .../base/icons/icons/phone/property-16.scss | 2 +- .../base/icons/icons/phone/property-24.scss | 2 +- .../base/icons/icons/pie-chart/index.scss | 2 +- .../base/icons/icons/pie-chart/keyframes.scss | 2 +- .../icons/icons/pie-chart/placeholders.scss | 2 +- .../icons/icons/pie-chart/property-16.scss | 2 +- .../icons/icons/pie-chart/property-24.scss | 2 +- .../styles/base/icons/icons/pin/index.scss | 2 +- .../base/icons/icons/pin/keyframes.scss | 2 +- .../base/icons/icons/pin/placeholders.scss | 2 +- .../base/icons/icons/pin/property-16.scss | 2 +- .../base/icons/icons/pin/property-24.scss | 2 +- .../base/icons/icons/play-circle/index.scss | 2 +- .../icons/icons/play-circle/keyframes.scss | 2 +- .../icons/icons/play-circle/placeholders.scss | 2 +- .../icons/icons/play-circle/property-16.scss | 2 +- .../icons/icons/play-circle/property-24.scss | 2 +- .../base/icons/icons/play-fill/index.scss | 2 +- .../base/icons/icons/play-fill/keyframes.scss | 2 +- .../icons/icons/play-fill/placeholders.scss | 2 +- .../base/icons/icons/play-outline/index.scss | 2 +- .../icons/icons/play-outline/keyframes.scss | 2 +- .../icons/play-outline/placeholders.scss | 2 +- .../base/icons/icons/play-plain/index.scss | 2 +- .../icons/icons/play-plain/keyframes.scss | 2 +- .../icons/icons/play-plain/placeholders.scss | 2 +- .../styles/base/icons/icons/play/index.scss | 2 +- .../base/icons/icons/play/keyframes.scss | 2 +- .../base/icons/icons/play/placeholders.scss | 2 +- .../base/icons/icons/play/property-16.scss | 2 +- .../base/icons/icons/play/property-24.scss | 2 +- .../icons/icons/plus-circle-fill/index.scss | 2 +- .../icons/plus-circle-fill/keyframes.scss | 2 +- .../icons/plus-circle-fill/placeholders.scss | 2 +- .../icons/plus-circle-outline/index.scss | 2 +- .../icons/plus-circle-outline/keyframes.scss | 2 +- .../plus-circle-outline/placeholders.scss | 2 +- .../base/icons/icons/plus-circle/index.scss | 2 +- .../icons/icons/plus-circle/keyframes.scss | 2 +- .../icons/icons/plus-circle/placeholders.scss | 2 +- .../icons/icons/plus-circle/property-16.scss | 2 +- .../icons/icons/plus-circle/property-24.scss | 2 +- .../base/icons/icons/plus-plain/index.scss | 2 +- .../icons/icons/plus-plain/keyframes.scss | 2 +- .../icons/icons/plus-plain/placeholders.scss | 2 +- .../icons/icons/plus-square-fill/index.scss | 2 +- .../icons/plus-square-fill/keyframes.scss | 2 +- .../icons/plus-square-fill/placeholders.scss | 2 +- .../base/icons/icons/plus-square/index.scss | 2 +- .../icons/icons/plus-square/keyframes.scss | 2 +- .../icons/icons/plus-square/placeholders.scss | 2 +- .../icons/icons/plus-square/property-16.scss | 2 +- .../icons/icons/plus-square/property-24.scss | 2 +- .../styles/base/icons/icons/plus/index.scss | 2 +- .../base/icons/icons/plus/keyframes.scss | 2 +- .../base/icons/icons/plus/placeholders.scss | 2 +- .../base/icons/icons/plus/property-16.scss | 2 +- .../base/icons/icons/plus/property-24.scss | 2 +- .../styles/base/icons/icons/port/index.scss | 2 +- .../base/icons/icons/port/keyframes.scss | 2 +- .../base/icons/icons/port/placeholders.scss | 2 +- .../base/icons/icons/port/property-16.scss | 2 +- .../base/icons/icons/port/property-24.scss | 2 +- .../styles/base/icons/icons/power/index.scss | 2 +- .../base/icons/icons/power/keyframes.scss | 2 +- .../base/icons/icons/power/placeholders.scss | 2 +- .../base/icons/icons/power/property-16.scss | 2 +- .../base/icons/icons/power/property-24.scss | 2 +- .../base/icons/icons/printer/index.scss | 2 +- .../base/icons/icons/printer/keyframes.scss | 2 +- .../icons/icons/printer/placeholders.scss | 2 +- .../base/icons/icons/printer/property-16.scss | 2 +- .../base/icons/icons/printer/property-24.scss | 2 +- .../base/icons/icons/protocol/index.scss | 2 +- .../base/icons/icons/protocol/keyframes.scss | 2 +- .../icons/icons/protocol/placeholders.scss | 2 +- .../icons/icons/protocol/property-16.scss | 2 +- .../icons/icons/protocol/property-24.scss | 2 +- .../base/icons/icons/provider/index.scss | 2 +- .../base/icons/icons/provider/keyframes.scss | 2 +- .../icons/icons/provider/placeholders.scss | 2 +- .../icons/icons/provider/property-16.scss | 2 +- .../icons/icons/provider/property-24.scss | 2 +- .../icons/icons/public-default/index.scss | 2 +- .../icons/icons/public-default/keyframes.scss | 2 +- .../icons/public-default/placeholders.scss | 2 +- .../base/icons/icons/public-locked/index.scss | 2 +- .../icons/icons/public-locked/keyframes.scss | 2 +- .../icons/public-locked/placeholders.scss | 2 +- .../styles/base/icons/icons/queue/index.scss | 2 +- .../base/icons/icons/queue/keyframes.scss | 2 +- .../base/icons/icons/queue/placeholders.scss | 2 +- .../base/icons/icons/queue/property-16.scss | 2 +- .../base/icons/icons/queue/property-24.scss | 2 +- .../icons/radio-button-checked/index.scss | 2 +- .../icons/radio-button-checked/keyframes.scss | 2 +- .../radio-button-checked/placeholders.scss | 2 +- .../icons/radio-button-unchecked/index.scss | 2 +- .../radio-button-unchecked/keyframes.scss | 2 +- .../radio-button-unchecked/placeholders.scss | 2 +- .../styles/base/icons/icons/radio/index.scss | 2 +- .../base/icons/icons/radio/keyframes.scss | 2 +- .../base/icons/icons/radio/placeholders.scss | 2 +- .../base/icons/icons/radio/property-16.scss | 2 +- .../base/icons/icons/radio/property-24.scss | 2 +- .../styles/base/icons/icons/random/index.scss | 2 +- .../base/icons/icons/random/keyframes.scss | 2 +- .../base/icons/icons/random/placeholders.scss | 2 +- .../base/icons/icons/random/property-16.scss | 2 +- .../base/icons/icons/random/property-24.scss | 2 +- .../base/icons/icons/redirect/index.scss | 2 +- .../base/icons/icons/redirect/keyframes.scss | 2 +- .../icons/icons/redirect/placeholders.scss | 2 +- .../icons/icons/redirect/property-16.scss | 2 +- .../icons/icons/redirect/property-24.scss | 2 +- .../base/icons/icons/refresh-alert/index.scss | 2 +- .../icons/icons/refresh-alert/keyframes.scss | 2 +- .../icons/refresh-alert/placeholders.scss | 2 +- .../icons/icons/refresh-default/index.scss | 2 +- .../icons/refresh-default/keyframes.scss | 2 +- .../icons/refresh-default/placeholders.scss | 2 +- .../styles/base/icons/icons/reload/index.scss | 2 +- .../base/icons/icons/reload/keyframes.scss | 2 +- .../base/icons/icons/reload/placeholders.scss | 2 +- .../base/icons/icons/reload/property-16.scss | 2 +- .../base/icons/icons/reload/property-24.scss | 2 +- .../styles/base/icons/icons/remix/index.scss | 2 +- .../base/icons/icons/remix/keyframes.scss | 2 +- .../base/icons/icons/remix/placeholders.scss | 2 +- .../styles/base/icons/icons/repeat/index.scss | 2 +- .../base/icons/icons/repeat/keyframes.scss | 2 +- .../base/icons/icons/repeat/placeholders.scss | 2 +- .../base/icons/icons/repeat/property-16.scss | 2 +- .../base/icons/icons/repeat/property-24.scss | 2 +- .../icons/icons/replication-direct/index.scss | 2 +- .../icons/replication-direct/keyframes.scss | 2 +- .../replication-direct/placeholders.scss | 2 +- .../icons/replication-direct/property-16.scss | 2 +- .../icons/replication-direct/property-24.scss | 2 +- .../icons/icons/replication-perf/index.scss | 2 +- .../icons/replication-perf/keyframes.scss | 2 +- .../icons/replication-perf/placeholders.scss | 2 +- .../icons/replication-perf/property-16.scss | 2 +- .../icons/replication-perf/property-24.scss | 2 +- .../styles/base/icons/icons/rewind/index.scss | 2 +- .../base/icons/icons/rewind/keyframes.scss | 2 +- .../base/icons/icons/rewind/placeholders.scss | 2 +- .../base/icons/icons/rewind/property-16.scss | 2 +- .../base/icons/icons/rewind/property-24.scss | 2 +- .../styles/base/icons/icons/ribbon/index.scss | 2 +- .../base/icons/icons/ribbon/keyframes.scss | 2 +- .../base/icons/icons/ribbon/placeholders.scss | 2 +- .../styles/base/icons/icons/rocket/index.scss | 2 +- .../base/icons/icons/rocket/keyframes.scss | 2 +- .../base/icons/icons/rocket/placeholders.scss | 2 +- .../base/icons/icons/rocket/property-16.scss | 2 +- .../base/icons/icons/rocket/property-24.scss | 2 +- .../base/icons/icons/rotate-ccw/index.scss | 2 +- .../icons/icons/rotate-ccw/keyframes.scss | 2 +- .../icons/icons/rotate-ccw/placeholders.scss | 2 +- .../icons/icons/rotate-ccw/property-16.scss | 2 +- .../icons/icons/rotate-ccw/property-24.scss | 2 +- .../base/icons/icons/rotate-cw/index.scss | 2 +- .../base/icons/icons/rotate-cw/keyframes.scss | 2 +- .../icons/icons/rotate-cw/placeholders.scss | 2 +- .../icons/icons/rotate-cw/property-16.scss | 2 +- .../icons/icons/rotate-cw/property-24.scss | 2 +- .../styles/base/icons/icons/rss/index.scss | 2 +- .../base/icons/icons/rss/keyframes.scss | 2 +- .../base/icons/icons/rss/placeholders.scss | 2 +- .../base/icons/icons/rss/property-16.scss | 2 +- .../base/icons/icons/rss/property-24.scss | 2 +- .../styles/base/icons/icons/run/index.scss | 2 +- .../base/icons/icons/run/keyframes.scss | 2 +- .../base/icons/icons/run/placeholders.scss | 2 +- .../base/icons/icons/run/property-16.scss | 2 +- .../base/icons/icons/run/property-24.scss | 2 +- .../base/icons/icons/running/index.scss | 2 +- .../base/icons/icons/running/keyframes.scss | 2 +- .../icons/icons/running/placeholders.scss | 2 +- .../base/icons/icons/running/property-16.scss | 2 +- .../base/icons/icons/running/property-24.scss | 2 +- .../styles/base/icons/icons/save/index.scss | 2 +- .../base/icons/icons/save/keyframes.scss | 2 +- .../base/icons/icons/save/placeholders.scss | 2 +- .../base/icons/icons/save/property-16.scss | 2 +- .../base/icons/icons/save/property-24.scss | 2 +- .../base/icons/icons/scissors/index.scss | 2 +- .../base/icons/icons/scissors/keyframes.scss | 2 +- .../icons/icons/scissors/placeholders.scss | 2 +- .../icons/icons/scissors/property-16.scss | 2 +- .../icons/icons/scissors/property-24.scss | 2 +- .../base/icons/icons/search-color/index.scss | 2 +- .../icons/icons/search-color/keyframes.scss | 2 +- .../icons/search-color/placeholders.scss | 2 +- .../icons/icons/search-color/property-16.scss | 2 +- .../icons/icons/search-color/property-24.scss | 2 +- .../styles/base/icons/icons/search/index.scss | 2 +- .../base/icons/icons/search/keyframes.scss | 2 +- .../base/icons/icons/search/placeholders.scss | 2 +- .../base/icons/icons/search/property-16.scss | 2 +- .../base/icons/icons/search/property-24.scss | 2 +- .../styles/base/icons/icons/send/index.scss | 2 +- .../base/icons/icons/send/keyframes.scss | 2 +- .../base/icons/icons/send/placeholders.scss | 2 +- .../base/icons/icons/send/property-16.scss | 2 +- .../base/icons/icons/send/property-24.scss | 2 +- .../icons/icons/server-cluster/index.scss | 2 +- .../icons/icons/server-cluster/keyframes.scss | 2 +- .../icons/server-cluster/placeholders.scss | 2 +- .../icons/server-cluster/property-16.scss | 2 +- .../icons/server-cluster/property-24.scss | 2 +- .../styles/base/icons/icons/server/index.scss | 2 +- .../base/icons/icons/server/keyframes.scss | 2 +- .../base/icons/icons/server/placeholders.scss | 2 +- .../base/icons/icons/server/property-16.scss | 2 +- .../base/icons/icons/server/property-24.scss | 2 +- .../base/icons/icons/serverless/index.scss | 2 +- .../icons/icons/serverless/keyframes.scss | 2 +- .../icons/icons/serverless/placeholders.scss | 2 +- .../icons/icons/serverless/property-16.scss | 2 +- .../icons/icons/serverless/property-24.scss | 2 +- .../base/icons/icons/settings/index.scss | 2 +- .../base/icons/icons/settings/keyframes.scss | 2 +- .../icons/icons/settings/placeholders.scss | 2 +- .../icons/icons/settings/property-16.scss | 2 +- .../icons/icons/settings/property-24.scss | 2 +- .../styles/base/icons/icons/share/index.scss | 2 +- .../base/icons/icons/share/keyframes.scss | 2 +- .../base/icons/icons/share/placeholders.scss | 2 +- .../base/icons/icons/share/property-16.scss | 2 +- .../base/icons/icons/share/property-24.scss | 2 +- .../base/icons/icons/shield-alert/index.scss | 2 +- .../icons/icons/shield-alert/keyframes.scss | 2 +- .../icons/shield-alert/placeholders.scss | 2 +- .../icons/icons/shield-alert/property-16.scss | 2 +- .../icons/icons/shield-alert/property-24.scss | 2 +- .../base/icons/icons/shield-check/index.scss | 2 +- .../icons/icons/shield-check/keyframes.scss | 2 +- .../icons/shield-check/placeholders.scss | 2 +- .../icons/icons/shield-check/property-16.scss | 2 +- .../icons/icons/shield-check/property-24.scss | 2 +- .../base/icons/icons/shield-off/index.scss | 2 +- .../icons/icons/shield-off/keyframes.scss | 2 +- .../icons/icons/shield-off/placeholders.scss | 2 +- .../icons/icons/shield-off/property-16.scss | 2 +- .../icons/icons/shield-off/property-24.scss | 2 +- .../base/icons/icons/shield-x/index.scss | 2 +- .../base/icons/icons/shield-x/keyframes.scss | 2 +- .../icons/icons/shield-x/placeholders.scss | 2 +- .../icons/icons/shield-x/property-16.scss | 2 +- .../icons/icons/shield-x/property-24.scss | 2 +- .../styles/base/icons/icons/shield/index.scss | 2 +- .../base/icons/icons/shield/keyframes.scss | 2 +- .../base/icons/icons/shield/placeholders.scss | 2 +- .../base/icons/icons/shield/property-16.scss | 2 +- .../base/icons/icons/shield/property-24.scss | 2 +- .../base/icons/icons/shopping-bag/index.scss | 2 +- .../icons/icons/shopping-bag/keyframes.scss | 2 +- .../icons/shopping-bag/placeholders.scss | 2 +- .../icons/icons/shopping-bag/property-16.scss | 2 +- .../icons/icons/shopping-bag/property-24.scss | 2 +- .../base/icons/icons/shopping-cart/index.scss | 2 +- .../icons/icons/shopping-cart/keyframes.scss | 2 +- .../icons/shopping-cart/placeholders.scss | 2 +- .../icons/shopping-cart/property-16.scss | 2 +- .../icons/shopping-cart/property-24.scss | 2 +- .../base/icons/icons/shuffle/index.scss | 2 +- .../base/icons/icons/shuffle/keyframes.scss | 2 +- .../icons/icons/shuffle/placeholders.scss | 2 +- .../base/icons/icons/shuffle/property-16.scss | 2 +- .../base/icons/icons/shuffle/property-24.scss | 2 +- .../base/icons/icons/sidebar-hide/index.scss | 2 +- .../icons/icons/sidebar-hide/keyframes.scss | 2 +- .../icons/sidebar-hide/placeholders.scss | 2 +- .../icons/icons/sidebar-hide/property-16.scss | 2 +- .../icons/icons/sidebar-hide/property-24.scss | 2 +- .../base/icons/icons/sidebar-show/index.scss | 2 +- .../icons/icons/sidebar-show/keyframes.scss | 2 +- .../icons/sidebar-show/placeholders.scss | 2 +- .../icons/icons/sidebar-show/property-16.scss | 2 +- .../icons/icons/sidebar-show/property-24.scss | 2 +- .../base/icons/icons/sidebar/index.scss | 2 +- .../base/icons/icons/sidebar/keyframes.scss | 2 +- .../icons/icons/sidebar/placeholders.scss | 2 +- .../base/icons/icons/sidebar/property-16.scss | 2 +- .../base/icons/icons/sidebar/property-24.scss | 2 +- .../base/icons/icons/sign-in/index.scss | 2 +- .../base/icons/icons/sign-in/keyframes.scss | 2 +- .../icons/icons/sign-in/placeholders.scss | 2 +- .../base/icons/icons/sign-in/property-16.scss | 2 +- .../base/icons/icons/sign-in/property-24.scss | 2 +- .../base/icons/icons/sign-out/index.scss | 2 +- .../base/icons/icons/sign-out/keyframes.scss | 2 +- .../icons/icons/sign-out/placeholders.scss | 2 +- .../icons/icons/sign-out/property-16.scss | 2 +- .../icons/icons/sign-out/property-24.scss | 2 +- .../base/icons/icons/skip-back/index.scss | 2 +- .../base/icons/icons/skip-back/keyframes.scss | 2 +- .../icons/icons/skip-back/placeholders.scss | 2 +- .../icons/icons/skip-back/property-16.scss | 2 +- .../icons/icons/skip-back/property-24.scss | 2 +- .../base/icons/icons/skip-forward/index.scss | 2 +- .../icons/icons/skip-forward/keyframes.scss | 2 +- .../icons/skip-forward/placeholders.scss | 2 +- .../icons/icons/skip-forward/property-16.scss | 2 +- .../icons/icons/skip-forward/property-24.scss | 2 +- .../styles/base/icons/icons/skip/index.scss | 2 +- .../base/icons/icons/skip/keyframes.scss | 2 +- .../base/icons/icons/skip/placeholders.scss | 2 +- .../base/icons/icons/skip/property-16.scss | 2 +- .../base/icons/icons/skip/property-24.scss | 2 +- .../base/icons/icons/slack-color/index.scss | 2 +- .../icons/icons/slack-color/keyframes.scss | 2 +- .../icons/icons/slack-color/placeholders.scss | 2 +- .../icons/icons/slack-color/property-16.scss | 2 +- .../icons/icons/slack-color/property-24.scss | 2 +- .../styles/base/icons/icons/slack/index.scss | 2 +- .../base/icons/icons/slack/keyframes.scss | 2 +- .../base/icons/icons/slack/placeholders.scss | 2 +- .../base/icons/icons/slack/property-16.scss | 2 +- .../base/icons/icons/slack/property-24.scss | 2 +- .../base/icons/icons/slash-square/index.scss | 2 +- .../icons/icons/slash-square/keyframes.scss | 2 +- .../icons/slash-square/placeholders.scss | 2 +- .../icons/icons/slash-square/property-16.scss | 2 +- .../icons/icons/slash-square/property-24.scss | 2 +- .../styles/base/icons/icons/slash/index.scss | 2 +- .../base/icons/icons/slash/keyframes.scss | 2 +- .../base/icons/icons/slash/placeholders.scss | 2 +- .../base/icons/icons/slash/property-16.scss | 2 +- .../base/icons/icons/slash/property-24.scss | 2 +- .../base/icons/icons/sliders/index.scss | 2 +- .../base/icons/icons/sliders/keyframes.scss | 2 +- .../icons/icons/sliders/placeholders.scss | 2 +- .../base/icons/icons/sliders/property-16.scss | 2 +- .../base/icons/icons/sliders/property-24.scss | 2 +- .../base/icons/icons/smartphone/index.scss | 2 +- .../icons/icons/smartphone/keyframes.scss | 2 +- .../icons/icons/smartphone/placeholders.scss | 2 +- .../icons/icons/smartphone/property-16.scss | 2 +- .../icons/icons/smartphone/property-24.scss | 2 +- .../styles/base/icons/icons/smile/index.scss | 2 +- .../base/icons/icons/smile/keyframes.scss | 2 +- .../base/icons/icons/smile/placeholders.scss | 2 +- .../base/icons/icons/smile/property-16.scss | 2 +- .../base/icons/icons/smile/property-24.scss | 2 +- .../styles/base/icons/icons/socket/index.scss | 2 +- .../base/icons/icons/socket/keyframes.scss | 2 +- .../base/icons/icons/socket/placeholders.scss | 2 +- .../base/icons/icons/socket/property-16.scss | 2 +- .../base/icons/icons/socket/property-24.scss | 2 +- .../base/icons/icons/sort-asc/index.scss | 2 +- .../base/icons/icons/sort-asc/keyframes.scss | 2 +- .../icons/icons/sort-asc/placeholders.scss | 2 +- .../icons/icons/sort-asc/property-16.scss | 2 +- .../icons/icons/sort-asc/property-24.scss | 2 +- .../base/icons/icons/sort-desc/index.scss | 2 +- .../base/icons/icons/sort-desc/keyframes.scss | 2 +- .../icons/icons/sort-desc/placeholders.scss | 2 +- .../icons/icons/sort-desc/property-16.scss | 2 +- .../icons/icons/sort-desc/property-24.scss | 2 +- .../styles/base/icons/icons/sort/index.scss | 2 +- .../base/icons/icons/sort/keyframes.scss | 2 +- .../base/icons/icons/sort/placeholders.scss | 2 +- .../base/icons/icons/source-file/index.scss | 2 +- .../icons/icons/source-file/keyframes.scss | 2 +- .../icons/icons/source-file/placeholders.scss | 2 +- .../base/icons/icons/speaker/index.scss | 2 +- .../base/icons/icons/speaker/keyframes.scss | 2 +- .../icons/icons/speaker/placeholders.scss | 2 +- .../base/icons/icons/speaker/property-16.scss | 2 +- .../base/icons/icons/speaker/property-24.scss | 2 +- .../base/icons/icons/square-fill/index.scss | 2 +- .../icons/icons/square-fill/keyframes.scss | 2 +- .../icons/icons/square-fill/placeholders.scss | 2 +- .../icons/icons/square-fill/property-16.scss | 2 +- .../icons/icons/square-fill/property-24.scss | 2 +- .../styles/base/icons/icons/square/index.scss | 2 +- .../base/icons/icons/square/keyframes.scss | 2 +- .../base/icons/icons/square/placeholders.scss | 2 +- .../base/icons/icons/square/property-16.scss | 2 +- .../base/icons/icons/square/property-24.scss | 2 +- .../base/icons/icons/star-circle/index.scss | 2 +- .../icons/icons/star-circle/keyframes.scss | 2 +- .../icons/icons/star-circle/placeholders.scss | 2 +- .../icons/icons/star-circle/property-16.scss | 2 +- .../icons/icons/star-circle/property-24.scss | 2 +- .../base/icons/icons/star-fill/index.scss | 2 +- .../base/icons/icons/star-fill/keyframes.scss | 2 +- .../icons/icons/star-fill/placeholders.scss | 2 +- .../icons/icons/star-fill/property-16.scss | 2 +- .../icons/icons/star-fill/property-24.scss | 2 +- .../base/icons/icons/star-off/index.scss | 2 +- .../base/icons/icons/star-off/keyframes.scss | 2 +- .../icons/icons/star-off/placeholders.scss | 2 +- .../icons/icons/star-off/property-16.scss | 2 +- .../icons/icons/star-off/property-24.scss | 2 +- .../base/icons/icons/star-outline/index.scss | 2 +- .../icons/icons/star-outline/keyframes.scss | 2 +- .../icons/star-outline/placeholders.scss | 2 +- .../styles/base/icons/icons/star/index.scss | 2 +- .../base/icons/icons/star/keyframes.scss | 2 +- .../base/icons/icons/star/placeholders.scss | 2 +- .../base/icons/icons/star/property-16.scss | 2 +- .../base/icons/icons/star/property-24.scss | 2 +- .../base/icons/icons/stop-circle/index.scss | 2 +- .../icons/icons/stop-circle/keyframes.scss | 2 +- .../icons/icons/stop-circle/placeholders.scss | 2 +- .../icons/icons/stop-circle/property-16.scss | 2 +- .../icons/icons/stop-circle/property-24.scss | 2 +- .../base/icons/icons/sub-left/index.scss | 2 +- .../base/icons/icons/sub-left/keyframes.scss | 2 +- .../icons/icons/sub-left/placeholders.scss | 2 +- .../base/icons/icons/sub-right/index.scss | 2 +- .../base/icons/icons/sub-right/keyframes.scss | 2 +- .../icons/icons/sub-right/placeholders.scss | 2 +- .../styles/base/icons/icons/sun/index.scss | 2 +- .../base/icons/icons/sun/keyframes.scss | 2 +- .../base/icons/icons/sun/placeholders.scss | 2 +- .../base/icons/icons/sun/property-16.scss | 2 +- .../base/icons/icons/sun/property-24.scss | 2 +- .../base/icons/icons/support/index.scss | 2 +- .../base/icons/icons/support/keyframes.scss | 2 +- .../icons/icons/support/placeholders.scss | 2 +- .../base/icons/icons/support/property-16.scss | 2 +- .../base/icons/icons/support/property-24.scss | 2 +- .../icons/icons/swap-horizontal/index.scss | 2 +- .../icons/swap-horizontal/keyframes.scss | 2 +- .../icons/swap-horizontal/placeholders.scss | 2 +- .../icons/swap-horizontal/property-16.scss | 2 +- .../icons/swap-horizontal/property-24.scss | 2 +- .../base/icons/icons/swap-vertical/index.scss | 2 +- .../icons/icons/swap-vertical/keyframes.scss | 2 +- .../icons/swap-vertical/placeholders.scss | 2 +- .../icons/swap-vertical/property-16.scss | 2 +- .../icons/swap-vertical/property-24.scss | 2 +- .../base/icons/icons/switcher/index.scss | 2 +- .../base/icons/icons/switcher/keyframes.scss | 2 +- .../icons/icons/switcher/placeholders.scss | 2 +- .../icons/icons/switcher/property-16.scss | 2 +- .../icons/icons/switcher/property-24.scss | 2 +- .../base/icons/icons/sync-alert/index.scss | 2 +- .../icons/icons/sync-alert/keyframes.scss | 2 +- .../icons/icons/sync-alert/placeholders.scss | 2 +- .../icons/icons/sync-alert/property-16.scss | 2 +- .../icons/icons/sync-alert/property-24.scss | 2 +- .../base/icons/icons/sync-reverse/index.scss | 2 +- .../icons/icons/sync-reverse/keyframes.scss | 2 +- .../icons/sync-reverse/placeholders.scss | 2 +- .../icons/icons/sync-reverse/property-16.scss | 2 +- .../icons/icons/sync-reverse/property-24.scss | 2 +- .../styles/base/icons/icons/sync/index.scss | 2 +- .../base/icons/icons/sync/keyframes.scss | 2 +- .../base/icons/icons/sync/placeholders.scss | 2 +- .../base/icons/icons/sync/property-16.scss | 2 +- .../base/icons/icons/sync/property-24.scss | 2 +- .../styles/base/icons/icons/tablet/index.scss | 2 +- .../base/icons/icons/tablet/keyframes.scss | 2 +- .../base/icons/icons/tablet/placeholders.scss | 2 +- .../base/icons/icons/tablet/property-16.scss | 2 +- .../base/icons/icons/tablet/property-24.scss | 2 +- .../styles/base/icons/icons/tag/index.scss | 2 +- .../base/icons/icons/tag/keyframes.scss | 2 +- .../base/icons/icons/tag/placeholders.scss | 2 +- .../base/icons/icons/tag/property-16.scss | 2 +- .../base/icons/icons/tag/property-24.scss | 2 +- .../styles/base/icons/icons/target/index.scss | 2 +- .../base/icons/icons/target/keyframes.scss | 2 +- .../base/icons/icons/target/placeholders.scss | 2 +- .../base/icons/icons/target/property-16.scss | 2 +- .../base/icons/icons/target/property-24.scss | 2 +- .../icons/icons/terminal-screen/index.scss | 2 +- .../icons/terminal-screen/keyframes.scss | 2 +- .../icons/terminal-screen/placeholders.scss | 2 +- .../icons/terminal-screen/property-16.scss | 2 +- .../icons/terminal-screen/property-24.scss | 2 +- .../base/icons/icons/terminal/index.scss | 2 +- .../base/icons/icons/terminal/keyframes.scss | 2 +- .../icons/icons/terminal/placeholders.scss | 2 +- .../icons/icons/terminal/property-16.scss | 2 +- .../icons/icons/terminal/property-24.scss | 2 +- .../icons/icons/terraform-color/index.scss | 2 +- .../icons/terraform-color/keyframes.scss | 2 +- .../icons/terraform-color/placeholders.scss | 2 +- .../icons/terraform-color/property-16.scss | 2 +- .../icons/terraform-color/property-24.scss | 2 +- .../base/icons/icons/terraform/index.scss | 2 +- .../base/icons/icons/terraform/keyframes.scss | 2 +- .../icons/icons/terraform/placeholders.scss | 2 +- .../icons/icons/terraform/property-16.scss | 2 +- .../icons/icons/terraform/property-24.scss | 2 +- .../base/icons/icons/thumbs-down/index.scss | 2 +- .../icons/icons/thumbs-down/keyframes.scss | 2 +- .../icons/icons/thumbs-down/placeholders.scss | 2 +- .../icons/icons/thumbs-down/property-16.scss | 2 +- .../icons/icons/thumbs-down/property-24.scss | 2 +- .../base/icons/icons/thumbs-up/index.scss | 2 +- .../base/icons/icons/thumbs-up/keyframes.scss | 2 +- .../icons/icons/thumbs-up/placeholders.scss | 2 +- .../icons/icons/thumbs-up/property-16.scss | 2 +- .../icons/icons/thumbs-up/property-24.scss | 2 +- .../base/icons/icons/toggle-left/index.scss | 2 +- .../icons/icons/toggle-left/keyframes.scss | 2 +- .../icons/icons/toggle-left/placeholders.scss | 2 +- .../icons/icons/toggle-left/property-16.scss | 2 +- .../icons/icons/toggle-left/property-24.scss | 2 +- .../base/icons/icons/toggle-right/index.scss | 2 +- .../icons/icons/toggle-right/keyframes.scss | 2 +- .../icons/toggle-right/placeholders.scss | 2 +- .../icons/icons/toggle-right/property-16.scss | 2 +- .../icons/icons/toggle-right/property-24.scss | 2 +- .../styles/base/icons/icons/token/index.scss | 2 +- .../base/icons/icons/token/keyframes.scss | 2 +- .../base/icons/icons/token/placeholders.scss | 2 +- .../base/icons/icons/token/property-16.scss | 2 +- .../base/icons/icons/token/property-24.scss | 2 +- .../styles/base/icons/icons/tools/index.scss | 2 +- .../base/icons/icons/tools/keyframes.scss | 2 +- .../base/icons/icons/tools/placeholders.scss | 2 +- .../base/icons/icons/tools/property-16.scss | 2 +- .../base/icons/icons/tools/property-24.scss | 2 +- .../styles/base/icons/icons/top/index.scss | 2 +- .../base/icons/icons/top/keyframes.scss | 2 +- .../base/icons/icons/top/placeholders.scss | 2 +- .../base/icons/icons/top/property-16.scss | 2 +- .../base/icons/icons/top/property-24.scss | 2 +- .../styles/base/icons/icons/trash/index.scss | 2 +- .../base/icons/icons/trash/keyframes.scss | 2 +- .../base/icons/icons/trash/placeholders.scss | 2 +- .../base/icons/icons/trash/property-16.scss | 2 +- .../base/icons/icons/trash/property-24.scss | 2 +- .../base/icons/icons/trend-down/index.scss | 2 +- .../icons/icons/trend-down/keyframes.scss | 2 +- .../icons/icons/trend-down/placeholders.scss | 2 +- .../icons/icons/trend-down/property-16.scss | 2 +- .../icons/icons/trend-down/property-24.scss | 2 +- .../base/icons/icons/trend-up/index.scss | 2 +- .../base/icons/icons/trend-up/keyframes.scss | 2 +- .../icons/icons/trend-up/placeholders.scss | 2 +- .../icons/icons/trend-up/property-16.scss | 2 +- .../icons/icons/trend-up/property-24.scss | 2 +- .../base/icons/icons/triangle-fill/index.scss | 2 +- .../icons/icons/triangle-fill/keyframes.scss | 2 +- .../icons/triangle-fill/placeholders.scss | 2 +- .../icons/triangle-fill/property-16.scss | 2 +- .../icons/triangle-fill/property-24.scss | 2 +- .../base/icons/icons/triangle/index.scss | 2 +- .../base/icons/icons/triangle/keyframes.scss | 2 +- .../icons/icons/triangle/placeholders.scss | 2 +- .../icons/icons/triangle/property-16.scss | 2 +- .../icons/icons/triangle/property-24.scss | 2 +- .../styles/base/icons/icons/truck/index.scss | 2 +- .../base/icons/icons/truck/keyframes.scss | 2 +- .../base/icons/icons/truck/placeholders.scss | 2 +- .../base/icons/icons/truck/property-16.scss | 2 +- .../base/icons/icons/truck/property-24.scss | 2 +- .../styles/base/icons/icons/tune/index.scss | 2 +- .../base/icons/icons/tune/keyframes.scss | 2 +- .../base/icons/icons/tune/placeholders.scss | 2 +- .../app/styles/base/icons/icons/tv/index.scss | 2 +- .../styles/base/icons/icons/tv/keyframes.scss | 2 +- .../base/icons/icons/tv/placeholders.scss | 2 +- .../base/icons/icons/tv/property-16.scss | 2 +- .../base/icons/icons/tv/property-24.scss | 2 +- .../base/icons/icons/twitch-color/index.scss | 2 +- .../icons/icons/twitch-color/keyframes.scss | 2 +- .../icons/twitch-color/placeholders.scss | 2 +- .../icons/icons/twitch-color/property-16.scss | 2 +- .../icons/icons/twitch-color/property-24.scss | 2 +- .../styles/base/icons/icons/twitch/index.scss | 2 +- .../base/icons/icons/twitch/keyframes.scss | 2 +- .../base/icons/icons/twitch/placeholders.scss | 2 +- .../base/icons/icons/twitch/property-16.scss | 2 +- .../base/icons/icons/twitch/property-24.scss | 2 +- .../base/icons/icons/twitter-color/index.scss | 2 +- .../icons/icons/twitter-color/keyframes.scss | 2 +- .../icons/twitter-color/placeholders.scss | 2 +- .../icons/twitter-color/property-16.scss | 2 +- .../icons/twitter-color/property-24.scss | 2 +- .../base/icons/icons/twitter/index.scss | 2 +- .../base/icons/icons/twitter/keyframes.scss | 2 +- .../icons/icons/twitter/placeholders.scss | 2 +- .../base/icons/icons/twitter/property-16.scss | 2 +- .../base/icons/icons/twitter/property-24.scss | 2 +- .../styles/base/icons/icons/type/index.scss | 2 +- .../base/icons/icons/type/keyframes.scss | 2 +- .../base/icons/icons/type/placeholders.scss | 2 +- .../base/icons/icons/type/property-16.scss | 2 +- .../base/icons/icons/type/property-24.scss | 2 +- .../base/icons/icons/unfold-close/index.scss | 2 +- .../icons/icons/unfold-close/keyframes.scss | 2 +- .../icons/unfold-close/placeholders.scss | 2 +- .../icons/icons/unfold-close/property-16.scss | 2 +- .../icons/icons/unfold-close/property-24.scss | 2 +- .../base/icons/icons/unfold-less/index.scss | 2 +- .../icons/icons/unfold-less/keyframes.scss | 2 +- .../icons/icons/unfold-less/placeholders.scss | 2 +- .../base/icons/icons/unfold-more/index.scss | 2 +- .../icons/icons/unfold-more/keyframes.scss | 2 +- .../icons/icons/unfold-more/placeholders.scss | 2 +- .../base/icons/icons/unfold-open/index.scss | 2 +- .../icons/icons/unfold-open/keyframes.scss | 2 +- .../icons/icons/unfold-open/placeholders.scss | 2 +- .../icons/icons/unfold-open/property-16.scss | 2 +- .../icons/icons/unfold-open/property-24.scss | 2 +- .../styles/base/icons/icons/union/index.scss | 2 +- .../base/icons/icons/union/keyframes.scss | 2 +- .../base/icons/icons/union/placeholders.scss | 2 +- .../base/icons/icons/union/property-16.scss | 2 +- .../base/icons/icons/union/property-24.scss | 2 +- .../styles/base/icons/icons/unlock/index.scss | 2 +- .../base/icons/icons/unlock/keyframes.scss | 2 +- .../base/icons/icons/unlock/placeholders.scss | 2 +- .../base/icons/icons/unlock/property-16.scss | 2 +- .../base/icons/icons/unlock/property-24.scss | 2 +- .../styles/base/icons/icons/upload/index.scss | 2 +- .../base/icons/icons/upload/keyframes.scss | 2 +- .../base/icons/icons/upload/placeholders.scss | 2 +- .../base/icons/icons/upload/property-16.scss | 2 +- .../base/icons/icons/upload/property-24.scss | 2 +- .../base/icons/icons/user-add/index.scss | 2 +- .../base/icons/icons/user-add/keyframes.scss | 2 +- .../icons/icons/user-add/placeholders.scss | 2 +- .../base/icons/icons/user-check/index.scss | 2 +- .../icons/icons/user-check/keyframes.scss | 2 +- .../icons/icons/user-check/placeholders.scss | 2 +- .../icons/icons/user-check/property-16.scss | 2 +- .../icons/icons/user-check/property-24.scss | 2 +- .../icons/icons/user-circle-fill/index.scss | 2 +- .../icons/user-circle-fill/keyframes.scss | 2 +- .../icons/user-circle-fill/placeholders.scss | 2 +- .../icons/user-circle-fill/property-16.scss | 2 +- .../icons/user-circle-fill/property-24.scss | 2 +- .../base/icons/icons/user-circle/index.scss | 2 +- .../icons/icons/user-circle/keyframes.scss | 2 +- .../icons/icons/user-circle/placeholders.scss | 2 +- .../icons/icons/user-circle/property-16.scss | 2 +- .../icons/icons/user-circle/property-24.scss | 2 +- .../base/icons/icons/user-minus/index.scss | 2 +- .../icons/icons/user-minus/keyframes.scss | 2 +- .../icons/icons/user-minus/placeholders.scss | 2 +- .../icons/icons/user-minus/property-16.scss | 2 +- .../icons/icons/user-minus/property-24.scss | 2 +- .../icons/icons/user-organization/index.scss | 2 +- .../icons/user-organization/keyframes.scss | 2 +- .../icons/user-organization/placeholders.scss | 2 +- .../base/icons/icons/user-plain/index.scss | 2 +- .../icons/icons/user-plain/keyframes.scss | 2 +- .../icons/icons/user-plain/placeholders.scss | 2 +- .../base/icons/icons/user-plus/index.scss | 2 +- .../base/icons/icons/user-plus/keyframes.scss | 2 +- .../icons/icons/user-plus/placeholders.scss | 2 +- .../icons/icons/user-plus/property-16.scss | 2 +- .../icons/icons/user-plus/property-24.scss | 2 +- .../icons/icons/user-square-fill/index.scss | 2 +- .../icons/user-square-fill/keyframes.scss | 2 +- .../icons/user-square-fill/placeholders.scss | 2 +- .../icons/user-square-outline/index.scss | 2 +- .../icons/user-square-outline/keyframes.scss | 2 +- .../user-square-outline/placeholders.scss | 2 +- .../base/icons/icons/user-team/index.scss | 2 +- .../base/icons/icons/user-team/keyframes.scss | 2 +- .../icons/icons/user-team/placeholders.scss | 2 +- .../styles/base/icons/icons/user-x/index.scss | 2 +- .../base/icons/icons/user-x/keyframes.scss | 2 +- .../base/icons/icons/user-x/placeholders.scss | 2 +- .../base/icons/icons/user-x/property-16.scss | 2 +- .../base/icons/icons/user-x/property-24.scss | 2 +- .../styles/base/icons/icons/user/index.scss | 2 +- .../base/icons/icons/user/keyframes.scss | 2 +- .../base/icons/icons/user/placeholders.scss | 2 +- .../base/icons/icons/user/property-16.scss | 2 +- .../base/icons/icons/user/property-24.scss | 2 +- .../styles/base/icons/icons/users/index.scss | 2 +- .../base/icons/icons/users/keyframes.scss | 2 +- .../base/icons/icons/users/placeholders.scss | 2 +- .../base/icons/icons/users/property-16.scss | 2 +- .../base/icons/icons/users/property-24.scss | 2 +- .../base/icons/icons/vagrant-color/index.scss | 2 +- .../icons/icons/vagrant-color/keyframes.scss | 2 +- .../icons/vagrant-color/placeholders.scss | 2 +- .../icons/vagrant-color/property-16.scss | 2 +- .../icons/vagrant-color/property-24.scss | 2 +- .../base/icons/icons/vagrant/index.scss | 2 +- .../base/icons/icons/vagrant/keyframes.scss | 2 +- .../icons/icons/vagrant/placeholders.scss | 2 +- .../base/icons/icons/vagrant/property-16.scss | 2 +- .../base/icons/icons/vagrant/property-24.scss | 2 +- .../base/icons/icons/vault-color/index.scss | 2 +- .../icons/icons/vault-color/keyframes.scss | 2 +- .../icons/icons/vault-color/placeholders.scss | 2 +- .../icons/icons/vault-color/property-16.scss | 2 +- .../icons/icons/vault-color/property-24.scss | 2 +- .../styles/base/icons/icons/vault/index.scss | 2 +- .../base/icons/icons/vault/keyframes.scss | 2 +- .../base/icons/icons/vault/placeholders.scss | 2 +- .../base/icons/icons/vault/property-16.scss | 2 +- .../base/icons/icons/vault/property-24.scss | 2 +- .../base/icons/icons/verified/index.scss | 2 +- .../base/icons/icons/verified/keyframes.scss | 2 +- .../icons/icons/verified/placeholders.scss | 2 +- .../icons/icons/verified/property-16.scss | 2 +- .../icons/icons/verified/property-24.scss | 2 +- .../base/icons/icons/video-off/index.scss | 2 +- .../base/icons/icons/video-off/keyframes.scss | 2 +- .../icons/icons/video-off/placeholders.scss | 2 +- .../icons/icons/video-off/property-16.scss | 2 +- .../icons/icons/video-off/property-24.scss | 2 +- .../styles/base/icons/icons/video/index.scss | 2 +- .../base/icons/icons/video/keyframes.scss | 2 +- .../base/icons/icons/video/placeholders.scss | 2 +- .../base/icons/icons/video/property-16.scss | 2 +- .../base/icons/icons/video/property-24.scss | 2 +- .../icons/icons/visibility-hide/index.scss | 2 +- .../icons/visibility-hide/keyframes.scss | 2 +- .../icons/visibility-hide/placeholders.scss | 2 +- .../icons/icons/visibility-show/index.scss | 2 +- .../icons/visibility-show/keyframes.scss | 2 +- .../icons/visibility-show/placeholders.scss | 2 +- .../base/icons/icons/vmware-color/index.scss | 2 +- .../icons/icons/vmware-color/keyframes.scss | 2 +- .../icons/vmware-color/placeholders.scss | 2 +- .../icons/icons/vmware-color/property-16.scss | 2 +- .../icons/icons/vmware-color/property-24.scss | 2 +- .../styles/base/icons/icons/vmware/index.scss | 2 +- .../base/icons/icons/vmware/keyframes.scss | 2 +- .../base/icons/icons/vmware/placeholders.scss | 2 +- .../base/icons/icons/vmware/property-16.scss | 2 +- .../base/icons/icons/vmware/property-24.scss | 2 +- .../base/icons/icons/volume-2/index.scss | 2 +- .../base/icons/icons/volume-2/keyframes.scss | 2 +- .../icons/icons/volume-2/placeholders.scss | 2 +- .../icons/icons/volume-2/property-16.scss | 2 +- .../icons/icons/volume-2/property-24.scss | 2 +- .../base/icons/icons/volume-down/index.scss | 2 +- .../icons/icons/volume-down/keyframes.scss | 2 +- .../icons/icons/volume-down/placeholders.scss | 2 +- .../icons/icons/volume-down/property-16.scss | 2 +- .../icons/icons/volume-down/property-24.scss | 2 +- .../base/icons/icons/volume-x/index.scss | 2 +- .../base/icons/icons/volume-x/keyframes.scss | 2 +- .../icons/icons/volume-x/placeholders.scss | 2 +- .../icons/icons/volume-x/property-16.scss | 2 +- .../icons/icons/volume-x/property-24.scss | 2 +- .../styles/base/icons/icons/volume/index.scss | 2 +- .../base/icons/icons/volume/keyframes.scss | 2 +- .../base/icons/icons/volume/placeholders.scss | 2 +- .../base/icons/icons/volume/property-16.scss | 2 +- .../base/icons/icons/volume/property-24.scss | 2 +- .../styles/base/icons/icons/wall/index.scss | 2 +- .../base/icons/icons/wall/keyframes.scss | 2 +- .../base/icons/icons/wall/placeholders.scss | 2 +- .../base/icons/icons/wall/property-16.scss | 2 +- .../base/icons/icons/wall/property-24.scss | 2 +- .../styles/base/icons/icons/wand/index.scss | 2 +- .../base/icons/icons/wand/keyframes.scss | 2 +- .../base/icons/icons/wand/placeholders.scss | 2 +- .../base/icons/icons/wand/property-16.scss | 2 +- .../base/icons/icons/wand/property-24.scss | 2 +- .../styles/base/icons/icons/watch/index.scss | 2 +- .../base/icons/icons/watch/keyframes.scss | 2 +- .../base/icons/icons/watch/placeholders.scss | 2 +- .../base/icons/icons/watch/property-16.scss | 2 +- .../base/icons/icons/watch/property-24.scss | 2 +- .../icons/icons/waypoint-color/index.scss | 2 +- .../icons/icons/waypoint-color/keyframes.scss | 2 +- .../icons/waypoint-color/placeholders.scss | 2 +- .../icons/waypoint-color/property-16.scss | 2 +- .../icons/waypoint-color/property-24.scss | 2 +- .../base/icons/icons/waypoint/index.scss | 2 +- .../base/icons/icons/waypoint/keyframes.scss | 2 +- .../icons/icons/waypoint/placeholders.scss | 2 +- .../icons/icons/waypoint/property-16.scss | 2 +- .../icons/icons/waypoint/property-24.scss | 2 +- .../base/icons/icons/webhook/index.scss | 2 +- .../base/icons/icons/webhook/keyframes.scss | 2 +- .../icons/icons/webhook/placeholders.scss | 2 +- .../base/icons/icons/webhook/property-16.scss | 2 +- .../base/icons/icons/webhook/property-24.scss | 2 +- .../base/icons/icons/wifi-off/index.scss | 2 +- .../base/icons/icons/wifi-off/keyframes.scss | 2 +- .../icons/icons/wifi-off/placeholders.scss | 2 +- .../icons/icons/wifi-off/property-16.scss | 2 +- .../icons/icons/wifi-off/property-24.scss | 2 +- .../styles/base/icons/icons/wifi/index.scss | 2 +- .../base/icons/icons/wifi/keyframes.scss | 2 +- .../base/icons/icons/wifi/placeholders.scss | 2 +- .../base/icons/icons/wifi/property-16.scss | 2 +- .../base/icons/icons/wifi/property-24.scss | 2 +- .../styles/base/icons/icons/wrench/index.scss | 2 +- .../base/icons/icons/wrench/keyframes.scss | 2 +- .../base/icons/icons/wrench/placeholders.scss | 2 +- .../base/icons/icons/wrench/property-16.scss | 2 +- .../base/icons/icons/wrench/property-24.scss | 2 +- .../base/icons/icons/x-circle-fill/index.scss | 2 +- .../icons/icons/x-circle-fill/keyframes.scss | 2 +- .../icons/x-circle-fill/placeholders.scss | 2 +- .../icons/x-circle-fill/property-16.scss | 2 +- .../icons/x-circle-fill/property-24.scss | 2 +- .../base/icons/icons/x-circle/index.scss | 2 +- .../base/icons/icons/x-circle/keyframes.scss | 2 +- .../icons/icons/x-circle/placeholders.scss | 2 +- .../icons/icons/x-circle/property-16.scss | 2 +- .../icons/icons/x-circle/property-24.scss | 2 +- .../icons/icons/x-diamond-fill/index.scss | 2 +- .../icons/icons/x-diamond-fill/keyframes.scss | 2 +- .../icons/x-diamond-fill/placeholders.scss | 2 +- .../icons/x-diamond-fill/property-16.scss | 2 +- .../icons/x-diamond-fill/property-24.scss | 2 +- .../base/icons/icons/x-diamond/index.scss | 2 +- .../base/icons/icons/x-diamond/keyframes.scss | 2 +- .../icons/icons/x-diamond/placeholders.scss | 2 +- .../icons/icons/x-diamond/property-16.scss | 2 +- .../icons/icons/x-diamond/property-24.scss | 2 +- .../icons/icons/x-hexagon-fill/index.scss | 2 +- .../icons/icons/x-hexagon-fill/keyframes.scss | 2 +- .../icons/x-hexagon-fill/placeholders.scss | 2 +- .../icons/x-hexagon-fill/property-16.scss | 2 +- .../icons/x-hexagon-fill/property-24.scss | 2 +- .../base/icons/icons/x-hexagon/index.scss | 2 +- .../base/icons/icons/x-hexagon/keyframes.scss | 2 +- .../icons/icons/x-hexagon/placeholders.scss | 2 +- .../icons/icons/x-hexagon/property-16.scss | 2 +- .../icons/icons/x-hexagon/property-24.scss | 2 +- .../base/icons/icons/x-square-fill/index.scss | 2 +- .../icons/icons/x-square-fill/keyframes.scss | 2 +- .../icons/x-square-fill/placeholders.scss | 2 +- .../icons/x-square-fill/property-16.scss | 2 +- .../icons/x-square-fill/property-24.scss | 2 +- .../base/icons/icons/x-square/index.scss | 2 +- .../base/icons/icons/x-square/keyframes.scss | 2 +- .../icons/icons/x-square/placeholders.scss | 2 +- .../icons/icons/x-square/property-16.scss | 2 +- .../icons/icons/x-square/property-24.scss | 2 +- .../app/styles/base/icons/icons/x/index.scss | 2 +- .../styles/base/icons/icons/x/keyframes.scss | 2 +- .../base/icons/icons/x/placeholders.scss | 2 +- .../base/icons/icons/x/property-16.scss | 2 +- .../base/icons/icons/x/property-24.scss | 2 +- .../base/icons/icons/youtube-color/index.scss | 2 +- .../icons/icons/youtube-color/keyframes.scss | 2 +- .../icons/youtube-color/placeholders.scss | 2 +- .../icons/youtube-color/property-16.scss | 2 +- .../icons/youtube-color/property-24.scss | 2 +- .../base/icons/icons/youtube/index.scss | 2 +- .../base/icons/icons/youtube/keyframes.scss | 2 +- .../icons/icons/youtube/placeholders.scss | 2 +- .../base/icons/icons/youtube/property-16.scss | 2 +- .../base/icons/icons/youtube/property-24.scss | 2 +- .../base/icons/icons/zap-off/index.scss | 2 +- .../base/icons/icons/zap-off/keyframes.scss | 2 +- .../icons/icons/zap-off/placeholders.scss | 2 +- .../base/icons/icons/zap-off/property-16.scss | 2 +- .../base/icons/icons/zap-off/property-24.scss | 2 +- .../styles/base/icons/icons/zap/index.scss | 2 +- .../base/icons/icons/zap/keyframes.scss | 2 +- .../base/icons/icons/zap/placeholders.scss | 2 +- .../base/icons/icons/zap/property-16.scss | 2 +- .../base/icons/icons/zap/property-24.scss | 2 +- .../base/icons/icons/zoom-in/index.scss | 2 +- .../base/icons/icons/zoom-in/keyframes.scss | 2 +- .../icons/icons/zoom-in/placeholders.scss | 2 +- .../base/icons/icons/zoom-in/property-16.scss | 2 +- .../base/icons/icons/zoom-in/property-24.scss | 2 +- .../base/icons/icons/zoom-out/index.scss | 2 +- .../base/icons/icons/zoom-out/keyframes.scss | 2 +- .../icons/icons/zoom-out/placeholders.scss | 2 +- .../icons/icons/zoom-out/property-16.scss | 2 +- .../icons/icons/zoom-out/property-24.scss | 2 +- .../app/styles/base/icons/index.scss | 2 +- .../app/styles/base/icons/overrides.scss | 2 +- .../consul-ui/app/styles/base/index.scss | 2 +- .../app/styles/base/reset/base-variables.scss | 2 +- .../app/styles/base/reset/index.scss | 2 +- .../app/styles/base/reset/minireset.scss | 2 +- .../app/styles/base/reset/system.scss | 2 +- .../base/typography/base-keyframes.scss | 2 +- .../base/typography/base-placeholders.scss | 2 +- .../app/styles/base/typography/index.scss | 2 +- .../consul-ui/app/styles/components.scss | 11 +- ui/packages/consul-ui/app/styles/debug.scss | 3 +- ui/packages/consul-ui/app/styles/icons.scss | 2 +- ui/packages/consul-ui/app/styles/layout.scss | 5 +- .../consul-ui/app/styles/layouts/index.scss | 8 +- .../app/styles/prism-coldark-cold.scss | 2 +- .../app/styles/prism-coldark-dark.scss | 2 +- ui/packages/consul-ui/app/styles/routes.scss | 2 +- .../app/styles/routes/dc/acls/index.scss | 2 +- .../styles/routes/dc/intentions/index.scss | 2 +- .../app/styles/routes/dc/kv/index.scss | 2 +- .../app/styles/routes/dc/nodes/index.scss | 2 +- .../styles/routes/dc/overview/license.scss | 2 +- .../routes/dc/overview/serverstatus.scss | 2 +- .../app/styles/routes/dc/services/index.scss | 5 +- .../consul-ui/app/styles/tailwind.scss | 2 +- ui/packages/consul-ui/app/styles/themes.scss | 16 + .../consul-ui/app/styles/typography.scss | 6 +- .../consul-ui/app/styles/variables.scss | 2 +- .../app/styles/variables/custom-query.scss | 2 +- .../app/styles/variables/layout.scss | 2 +- .../consul-ui/app/styles/variables/skin.scss | 2 +- .../consul-ui/app/templates/application.hbs | 158 +- ui/packages/consul-ui/app/templates/dc.hbs | 2 +- .../consul-ui/app/templates/dc/acls.hbs | 2 +- .../templates/dc/acls/auth-methods/index.hbs | 2 +- .../templates/dc/acls/auth-methods/show.hbs | 2 +- .../dc/acls/auth-methods/show/auth-method.hbs | 2 +- .../acls/auth-methods/show/binding-rules.hbs | 2 +- .../acls/auth-methods/show/nspace-rules.hbs | 2 +- .../consul-ui/app/templates/dc/acls/index.hbs | 2 +- .../app/templates/dc/acls/policies/-form.hbs | 2 +- .../app/templates/dc/acls/policies/edit.hbs | 2 +- .../app/templates/dc/acls/policies/index.hbs | 2 +- .../app/templates/dc/acls/roles/-form.hbs | 2 +- .../app/templates/dc/acls/roles/edit.hbs | 2 +- .../app/templates/dc/acls/roles/index.hbs | 2 +- .../dc/acls/tokens/-fieldsets-legacy.hbs | 2 +- .../templates/dc/acls/tokens/-fieldsets.hbs | 2 +- .../app/templates/dc/acls/tokens/-form.hbs | 2 +- .../app/templates/dc/acls/tokens/edit.hbs | 2 +- .../app/templates/dc/acls/tokens/index.hbs | 2 +- .../app/templates/dc/intentions/edit.hbs | 2 +- .../app/templates/dc/intentions/index.hbs | 2 +- .../consul-ui/app/templates/dc/kv/edit.hbs | 2 +- .../consul-ui/app/templates/dc/kv/index.hbs | 2 +- .../app/templates/dc/nodes/index.hbs | 2 +- .../consul-ui/app/templates/dc/nodes/show.hbs | 4 +- .../templates/dc/nodes/show/healthchecks.hbs | 2 +- .../app/templates/dc/nodes/show/index.hbs | 2 +- .../app/templates/dc/nodes/show/metadata.hbs | 2 +- .../app/templates/dc/nodes/show/rtt.hbs | 2 +- .../app/templates/dc/nodes/show/services.hbs | 2 +- .../app/templates/dc/routing-config.hbs | 2 +- .../app/templates/dc/services/index.hbs | 9 +- .../app/templates/dc/services/instance.hbs | 4 +- .../dc/services/instance/addresses.hbs | 2 +- .../dc/services/instance/exposedpaths.hbs | 2 +- .../dc/services/instance/healthchecks.hbs | 2 +- .../dc/services/instance/metadata.hbs | 2 +- .../dc/services/instance/upstreams.hbs | 2 +- .../app/templates/dc/services/show.hbs | 2 +- .../app/templates/dc/services/show/index.hbs | 2 +- .../templates/dc/services/show/instances.hbs | 2 +- .../templates/dc/services/show/intentions.hbs | 2 +- .../dc/services/show/intentions/edit.hbs | 2 +- .../dc/services/show/intentions/index.hbs | 2 +- .../templates/dc/services/show/routing.hbs | 2 +- .../templates/dc/services/show/services.hbs | 2 +- .../app/templates/dc/services/show/tags.hbs | 2 +- .../templates/dc/services/show/topology.hbs | 2 +- .../templates/dc/services/show/upstreams.hbs | 2 +- .../consul-ui/app/templates/dc/show.hbs | 2 +- .../consul-ui/app/templates/dc/show/index.hbs | 2 +- .../app/templates/dc/show/license.hbs | 2 +- .../app/templates/dc/show/serverstatus.hbs | 2 +- ui/packages/consul-ui/app/templates/debug.hbs | 138 +- ui/packages/consul-ui/app/templates/error.hbs | 2 +- ui/packages/consul-ui/app/templates/index.hbs | 2 +- .../consul-ui/app/templates/loading.hbs | 2 +- .../consul-ui/app/templates/notfound.hbs | 2 +- .../app/templates/oauth-provider-debug.hbs | 2 +- .../consul-ui/app/templates/settings.hbs | 2 +- .../consul-ui/app/templates/unavailable.hbs | 106 - ui/packages/consul-ui/app/utils/ascend.js | 2 +- ui/packages/consul-ui/app/utils/atob.js | 2 +- ui/packages/consul-ui/app/utils/btoa.js | 2 +- .../consul-ui/app/utils/callable-type.js | 2 +- .../app/utils/create-fingerprinter.js | 2 +- ui/packages/consul-ui/app/utils/distance.js | 2 +- .../app/utils/dom/click-first-anchor.js | 2 +- .../consul-ui/app/utils/dom/closest.js | 2 +- .../app/utils/dom/create-listeners.js | 2 +- .../app/utils/dom/event-source/blocking.js | 2 +- .../app/utils/dom/event-source/cache.js | 2 +- .../app/utils/dom/event-source/callable.js | 2 +- .../app/utils/dom/event-source/index.js | 2 +- .../app/utils/dom/event-source/openable.js | 2 +- .../app/utils/dom/event-source/proxy.js | 2 +- .../app/utils/dom/event-source/resolver.js | 2 +- .../app/utils/dom/event-source/storage.js | 2 +- .../app/utils/dom/get-component-factory.js | 2 +- .../consul-ui/app/utils/dom/is-outside.js | 2 +- .../app/utils/dom/normalize-event.js | 2 +- .../consul-ui/app/utils/dom/qsa-factory.js | 2 +- .../consul-ui/app/utils/dom/sibling.js | 2 +- .../consul-ui/app/utils/editor/lint.js | 2 +- .../consul-ui/app/utils/filter/index.js | 2 +- .../consul-ui/app/utils/form/builder.js | 2 +- .../consul-ui/app/utils/form/changeset.js | 2 +- .../consul-ui/app/utils/get-environment.js | 12 +- .../app/utils/get-form-name-property.js | 2 +- .../app/utils/helpers/call-if-type.js | 2 +- .../consul-ui/app/utils/http/consul.js | 2 +- .../app/utils/http/create-headers.js | 2 +- .../app/utils/http/create-query-params.js | 2 +- .../consul-ui/app/utils/http/create-url.js | 2 +- ui/packages/consul-ui/app/utils/http/error.js | 2 +- .../consul-ui/app/utils/http/headers.js | 2 +- .../consul-ui/app/utils/http/method.js | 2 +- .../consul-ui/app/utils/http/request.js | 2 +- .../consul-ui/app/utils/http/status.js | 2 +- ui/packages/consul-ui/app/utils/http/xhr.js | 2 +- .../app/utils/intl/missing-message.js | 2 +- ui/packages/consul-ui/app/utils/isFolder.js | 2 +- ui/packages/consul-ui/app/utils/keyToArray.js | 2 +- ui/packages/consul-ui/app/utils/left-trim.js | 2 +- ui/packages/consul-ui/app/utils/maybe-call.js | 2 +- .../consul-ui/app/utils/merge-checks.js | 2 +- .../consul-ui/app/utils/minimizeModel.js | 2 +- .../consul-ui/app/utils/non-empty-set.js | 2 +- .../consul-ui/app/utils/path/resolve.js | 2 +- .../consul-ui/app/utils/promisedTimeout.js | 2 +- ui/packages/consul-ui/app/utils/right-trim.js | 2 +- .../app/utils/routing/redirect-to.js | 2 +- .../app/utils/routing/transitionable.js | 2 +- .../consul-ui/app/utils/routing/walk.js | 2 +- .../consul-ui/app/utils/routing/wildcard.js | 2 +- .../consul-ui/app/utils/search/exact.js | 2 +- .../consul-ui/app/utils/search/fuzzy.js | 2 +- .../consul-ui/app/utils/search/predicate.js | 2 +- .../consul-ui/app/utils/search/regexp.js | 2 +- .../app/utils/storage/local-storage.js | 2 +- ui/packages/consul-ui/app/utils/templatize.js | 2 +- .../consul-ui/app/utils/ticker/index.js | 2 +- ui/packages/consul-ui/app/utils/tomography.js | 2 +- ui/packages/consul-ui/app/utils/ucfirst.js | 2 +- .../app/utils/update-array-object.js | 2 +- .../intention-permission-http-header.js | 2 +- .../app/validations/intention-permission.js | 2 +- .../consul-ui/app/validations/intention.js | 2 +- ui/packages/consul-ui/app/validations/kv.js | 2 +- .../consul-ui/app/validations/policy.js | 2 +- ui/packages/consul-ui/app/validations/role.js | 2 +- .../consul-ui/app/validations/sometimes.js | 2 +- .../consul-ui/app/validations/token.js | 2 +- .../blueprints/adapter-test/index.js | 2 +- .../__path__/integration/adapters/__test__.js | 2 +- .../__path__/unit/adapters/__test__.js | 2 +- .../files/__root__/__path__/__name__.js | 2 +- .../consul-ui/blueprints/adapter/index.js | 2 +- .../__templatepath__/__templatename__.hbs | 2 +- .../consul-ui/blueprints/component/index.js | 2 +- .../files/__root__/__path__/__name__.scss | 2 +- .../__root__/__path__/__name__/index.scss | 2 +- .../__root__/__path__/__name__/layout.scss | 2 +- .../__root__/__path__/__name__/skin.scss | 2 +- .../blueprints/css-component/index.js | 2 +- .../consul-ui/blueprints/model-test/index.js | 2 +- .../__root__/__path__/unit/models/__test__.js | 2 +- .../model/files/__root__/__path__/__name__.js | 2 +- .../consul-ui/blueprints/model/index.js | 2 +- .../blueprints/repository-test/index.js | 2 +- .../services/repository/__test__.js | 2 +- .../unit/services/repository/__test__.js | 2 +- .../files/__root__/__path__/__name__.js | 2 +- .../consul-ui/blueprints/repository/index.js | 2 +- .../consul-ui/blueprints/route-test/index.js | 2 +- .../consul-ui/blueprints/route/index.js | 2 +- .../__templatepath__/__templatename__.hbs | 2 +- .../blueprints/serializer-test/index.js | 2 +- .../integration/serializers/__test__.js | 2 +- .../__path__/unit/serializers/__test__.js | 2 +- .../files/__root__/__path__/__name__.js | 2 +- .../consul-ui/blueprints/serializer/index.js | 2 +- .../consul-ui/config/deprecation-workflow.js | 2 +- ui/packages/consul-ui/config/ember-intl.js | 2 +- ui/packages/consul-ui/config/environment.js | 5 +- ui/packages/consul-ui/config/targets.js | 2 +- ui/packages/consul-ui/config/utils.js | 2 +- ui/packages/consul-ui/ember-cli-build.js | 3 +- ui/packages/consul-ui/lib/.eslintrc.js | 2 +- .../lib/colocated-components/index.js | 2 +- .../consul-ui/lib/commands/bin/list.js | 2 +- ui/packages/consul-ui/lib/commands/index.js | 2 +- .../consul-ui/lib/commands/lib/list.js | 2 +- .../consul-ui/lib/custom-element/index.js | 2 +- ui/packages/consul-ui/lib/startup/index.js | 2 +- .../lib/startup/templates/body.html.js | 10 +- .../lib/startup/templates/head.html.js | 2 +- .../consul-ui/mock-api/api/hcp/v2/link/global | 21 - .../prefixed-api/api/hcp/v2/link/global | 16 - .../node-tests/config/environment.js | 6 +- .../consul-ui/node-tests/config/utils.js | 2 +- ui/packages/consul-ui/package.json | 21 +- ui/packages/consul-ui/server/index.js | 2 +- ui/packages/consul-ui/tailwind.config.js | 2 +- ui/packages/consul-ui/testem.js | 2 +- .../policies/as-many/add-existing.feature | 1 + .../dc/intentions/filtered-select.feature | 36 + .../tests/acceptance/dc/nodes/show.feature | 19 + .../acceptance/dc/nspaces/manage.feature | 1 + .../acceptance/dc/services/dc-switch.feature | 2 + .../dc/services/show/topology/metrics.feature | 22 - .../tests/acceptance/hcp-login-test.js | 2 +- .../tests/acceptance/link-to-hcp-test.js | 64 - .../consul-ui/tests/acceptance/login.feature | 2 - .../acceptance/steps/api-prefix-steps.js | 2 +- .../steps/components/acl-filter-steps.js | 2 +- .../steps/components/catalog-filter-steps.js | 2 +- .../steps/components/catalog-toolbar-steps.js | 2 +- .../steps/components/copy-button-steps.js | 2 +- .../steps/components/kv-filter-steps.js | 2 +- .../steps/components/text-input-steps.js | 2 +- .../acceptance/steps/dc/acls/access-steps.js | 2 +- .../steps/dc/acls/auth-methods/index-steps.js | 2 +- .../dc/acls/auth-methods/navigation-steps.js | 2 +- .../dc/acls/auth-methods/sorting-steps.js | 2 +- .../acceptance/steps/dc/acls/index-steps.js | 2 +- .../steps/dc/acls/list-order-steps.js | 2 +- .../policies/as-many/add-existing-steps.js | 2 +- .../dc/acls/policies/as-many/add-new-steps.js | 2 +- .../dc/acls/policies/as-many/list-steps.js | 2 +- .../dc/acls/policies/as-many/nspaces-steps.js | 2 +- .../dc/acls/policies/as-many/remove-steps.js | 2 +- .../dc/acls/policies/as-many/reset-steps.js | 2 +- .../steps/dc/acls/policies/create-steps.js | 2 +- .../steps/dc/acls/policies/delete-steps.js | 2 +- .../steps/dc/acls/policies/index-steps.js | 2 +- .../dc/acls/policies/navigation-steps.js | 2 +- .../steps/dc/acls/policies/sorting-steps.js | 2 +- .../steps/dc/acls/policies/update-steps.js | 2 +- .../dc/acls/policies/view-management-steps.js | 2 +- .../dc/acls/policies/view-read-only-steps.js | 2 +- .../acls/roles/as-many/add-existing-steps.js | 2 +- .../dc/acls/roles/as-many/add-new-steps.js | 2 +- .../steps/dc/acls/roles/as-many/list-steps.js | 2 +- .../dc/acls/roles/as-many/remove-steps.js | 2 +- .../steps/dc/acls/roles/create-steps.js | 2 +- .../steps/dc/acls/roles/index-steps.js | 2 +- .../steps/dc/acls/roles/navigation-steps.js | 2 +- .../steps/dc/acls/roles/sorting-steps.js | 2 +- .../steps/dc/acls/roles/update-steps.js | 2 +- .../acls/tokens/anonymous-no-delete-steps.js | 2 +- .../steps/dc/acls/tokens/clone-steps.js | 2 +- .../steps/dc/acls/tokens/create-steps.js | 2 +- .../steps/dc/acls/tokens/index-steps.js | 2 +- .../dc/acls/tokens/legacy/update-steps.js | 2 +- .../dc/acls/tokens/login-errors-steps.js | 2 +- .../steps/dc/acls/tokens/login-steps.js | 2 +- .../steps/dc/acls/tokens/navigation-steps.js | 2 +- .../dc/acls/tokens/own-no-delete-steps.js | 2 +- .../steps/dc/acls/tokens/sorting-steps.js | 2 +- .../steps/dc/acls/tokens/update-steps.js | 2 +- .../steps/dc/acls/tokens/use-steps.js | 2 +- .../acceptance/steps/dc/acls/update-steps.js | 2 +- .../acceptance/steps/dc/acls/use-steps.js | 2 +- .../tests/acceptance/steps/dc/error-steps.js | 2 +- .../acceptance/steps/dc/forwarding-steps.js | 2 +- .../tests/acceptance/steps/dc/index-steps.js | 2 +- .../steps/dc/intentions/create-steps.js | 2 +- .../steps/dc/intentions/delete-steps.js | 2 +- .../dc/intentions/filtered-select-steps.js | 2 +- .../steps/dc/intentions/form-select-steps.js | 2 +- .../steps/dc/intentions/index-steps.js | 2 +- .../steps/dc/intentions/navigation-steps.js | 2 +- .../dc/intentions/permissions/create-steps.js | 2 +- .../dc/intentions/permissions/warn-steps.js | 2 +- .../steps/dc/intentions/read-only-steps.js | 2 +- .../steps/dc/intentions/sorting-steps.js | 2 +- .../steps/dc/intentions/update-steps.js | 2 +- .../steps/dc/kv/index/view-kvs-steps.js | 2 +- .../acceptance/steps/dc/kvs/create-steps.js | 2 +- .../acceptance/steps/dc/kvs/delete-steps.js | 2 +- .../acceptance/steps/dc/kvs/edit-steps.js | 2 +- .../acceptance/steps/dc/kvs/index-steps.js | 2 +- .../steps/dc/kvs/list-order-steps.js | 2 +- .../steps/dc/kvs/sessions/invalidate-steps.js | 2 +- .../steps/dc/kvs/trailing-slash-steps.js | 2 +- .../acceptance/steps/dc/kvs/update-steps.js | 2 +- .../steps/dc/list-blocking-steps.js | 2 +- .../tests/acceptance/steps/dc/list-steps.js | 2 +- .../steps/dc/nodes/empty-ids-steps.js | 2 +- .../acceptance/steps/dc/nodes/index-steps.js | 2 +- .../steps/dc/nodes/index/view-nodes-steps.js | 2 +- .../steps/dc/nodes/navigation-steps.js | 2 +- .../steps/dc/nodes/no-leader-steps.js | 2 +- .../steps/dc/nodes/services/list-steps.js | 2 +- .../dc/nodes/sessions/invalidate-steps.js | 2 +- .../steps/dc/nodes/sessions/list-steps.js | 2 +- .../acceptance/steps/dc/nodes/show-steps.js | 2 +- .../dc/nodes/show/health-checks-steps.js | 2 +- .../steps/dc/nodes/sorting-steps.js | 2 +- .../steps/dc/nspaces/create-steps.js | 2 +- .../steps/dc/nspaces/delete-steps.js | 2 +- .../steps/dc/nspaces/index-steps.js | 2 +- .../steps/dc/nspaces/manage-steps.js | 2 +- .../steps/dc/nspaces/sorting-steps.js | 2 +- .../steps/dc/nspaces/update-steps.js | 2 +- .../acceptance/steps/dc/peers/create-steps.js | 2 +- .../acceptance/steps/dc/peers/delete-steps.js | 2 +- .../steps/dc/peers/establish-steps.js | 2 +- .../acceptance/steps/dc/peers/index-steps.js | 2 +- .../steps/dc/peers/regenerate-steps.js | 2 +- .../acceptance/steps/dc/peers/show-steps.js | 2 +- .../steps/dc/routing-config-steps.js | 2 +- .../steps/dc/services/dc-switch-steps.js | 2 +- .../steps/dc/services/error-steps.js | 2 +- .../steps/dc/services/index-steps.js | 2 +- .../dc/services/index/view-services-steps.js | 2 +- .../dc/services/instances/error-steps.js | 2 +- .../services/instances/exposed-paths-steps.js | 2 +- .../dc/services/instances/gateway-steps.js | 2 +- .../services/instances/health-checks-steps.js | 2 +- .../dc/services/instances/navigation-steps.js | 2 +- .../dc/services/instances/proxy-steps.js | 2 +- .../steps/dc/services/instances/show-steps.js | 2 +- .../services/instances/sidecar-proxy-steps.js | 2 +- .../dc/services/instances/upstreams-steps.js | 2 +- .../dc/services/instances/with-proxy-steps.js | 2 +- .../services/instances/with-sidecar-steps.js | 2 +- .../steps/dc/services/list-blocking-steps.js | 2 +- .../steps/dc/services/list-steps.js | 2 +- .../steps/dc/services/navigation-steps.js | 2 +- .../steps/dc/services/show-routing-steps.js | 2 +- .../steps/dc/services/show-steps.js | 2 +- .../dc/services/show-with-slashes-steps.js | 2 +- .../steps/dc/services/show/dc-switch-steps.js | 2 +- .../services/show/intentions-error-steps.js | 2 +- .../services/show/intentions/create-steps.js | 2 +- .../services/show/intentions/index-steps.js | 2 +- .../dc/services/show/navigation-steps.js | 2 +- .../steps/dc/services/show/services-steps.js | 2 +- .../steps/dc/services/show/tags-steps.js | 2 +- .../dc/services/show/topology/empty-steps.js | 2 +- .../dc/services/show/topology/index-steps.js | 2 +- .../show/topology/intentions-steps.js | 2 +- .../services/show/topology/metrics-steps.js | 2 +- .../services/show/topology/notices-steps.js | 2 +- .../show/topology/routing-config-steps.js | 2 +- .../dc/services/show/topology/stats-steps.js | 2 +- .../steps/dc/services/show/upstreams-steps.js | 2 +- .../steps/dc/services/sorting-steps.js | 2 +- .../tests/acceptance/steps/deleting-steps.js | 2 +- .../steps/index-forwarding-steps.js | 2 +- .../acceptance/steps/login-errors-steps.js | 2 +- .../tests/acceptance/steps/login-steps.js | 2 +- .../steps/navigation-links-steps.js | 2 +- .../acceptance/steps/nodes/sorting-steps.js | 2 +- .../acceptance/steps/page-navigation-steps.js | 2 +- .../acceptance/steps/settings/show-steps.js | 2 +- .../acceptance/steps/settings/update-steps.js | 2 +- .../tests/acceptance/steps/startup-steps.js | 2 +- .../consul-ui/tests/acceptance/steps/steps.js | 2 +- .../acceptance/steps/submit-blank-steps.js | 2 +- .../acceptance/steps/token-header-steps.js | 2 +- .../tests/acceptance/token-header.feature | 1 - .../tests/acceptance/unavailable-test.js | 65 - ui/packages/consul-ui/tests/dictionary.js | 2 +- ui/packages/consul-ui/tests/helpers/api.js | 2 +- .../consul-ui/tests/helpers/destroy-app.js | 2 +- .../consul-ui/tests/helpers/flash-message.js | 2 +- .../tests/helpers/get-nspace-runner.js | 2 +- .../consul-ui/tests/helpers/measure.js | 2 +- .../tests/helpers/module-for-acceptance.js | 2 +- .../consul-ui/tests/helpers/normalizers.js | 2 +- ui/packages/consul-ui/tests/helpers/page.js | 2 +- ui/packages/consul-ui/tests/helpers/repo.js | 2 +- .../consul-ui/tests/helpers/set-cookies.js | 2 +- .../consul-ui/tests/helpers/stub-super.js | 2 +- .../consul-ui/tests/helpers/type-to-url.js | 2 +- .../tests/helpers/yadda-annotations.js | 2 +- ui/packages/consul-ui/tests/index.html | 2 +- .../integration/adapters/auth-method-test.js | 2 +- .../integration/adapters/binding-rule-test.js | 2 +- .../integration/adapters/coordinate-test.js | 2 +- .../adapters/discovery-chain-test.js | 2 +- .../integration/adapters/intention-test.js | 2 +- .../tests/integration/adapters/kv-test.js | 2 +- .../tests/integration/adapters/node-test.js | 2 +- .../tests/integration/adapters/nspace-test.js | 2 +- .../adapters/oidc-provider-test.js | 2 +- .../integration/adapters/partition-test.js | 2 +- .../integration/adapters/permission-test.js | 2 +- .../tests/integration/adapters/policy-test.js | 2 +- .../tests/integration/adapters/role-test.js | 2 +- .../adapters/service-instance-test.js | 2 +- .../integration/adapters/service-test.js | 2 +- .../integration/adapters/session-test.js | 2 +- .../tests/integration/adapters/token-test.js | 2 +- .../integration/adapters/topology-test.js | 2 +- .../integration/components/app-view-test.js | 2 +- .../integration/components/aria-menu-test.js | 2 +- .../components/auth-profile-test.js | 29 + .../components/code-editor-test.js | 2 +- .../components/confirmation-dialog-test.js | 2 +- .../components/consul/bucket/list-test.js | 2 +- .../consul/datacenter/selector-test.js | 18 +- .../components/consul/discovery-chain-test.js | 2 +- .../components/consul/hcp/home-test.js | 7 + .../consul/intention/permission/form-test.js | 2 +- .../intention/permission/header/form-test.js | 2 +- .../consul/node/agentless-notice-test.js | 2 +- .../components/data-collection-test.js | 2 +- .../components/data-source-test.js | 2 +- .../components/delete-confirmation-test.js | 2 +- .../components/event-source-test.js | 2 +- .../components/freetext-filter-test.js | 2 +- .../components/hashicorp-consul-test.js | 2 +- .../components/hcp-nav-item-test.js | 235 - .../integration/components/jwt-source-test.js | 2 +- .../components/link-to-hcp-banner-test.js | 108 - .../components/link-to-hcp-modal-test.js | 230 - .../components/list-collection-test.js | 2 +- .../components/oidc-select-test.js | 2 +- .../components/popover-menu-test.js | 2 +- .../components/radio-group-test.js | 2 +- .../tests/integration/components/ref-test.js | 2 +- .../components/resolver-card-test.js | 2 +- .../integration/components/route-card-test.js | 2 +- .../components/splitter-card-test.js | 2 +- .../integration/components/state-test.js | 2 +- .../integration/components/tab-nav-test.js | 2 +- .../components/tabular-collection-test.js | 2 +- .../components/tabular-details-test.js | 2 +- .../integration/components/tag-list-test.js | 2 +- .../components/toggle-button-test.js | 2 +- .../integration/components/token-list-test.js | 2 +- .../tests/integration/helpers/atob-test.js | 2 +- .../integration/helpers/dom-position-test.js | 2 +- .../integration/helpers/duration-from-test.js | 2 +- .../helpers/format-short-time-test.js | 2 +- .../helpers/hcp-authentication-link-test.js | 60 - .../helpers/hcp-resource-id-to-link-test.js | 41 - .../tests/integration/helpers/is-href-test.js | 2 +- .../tests/integration/helpers/last-test.js | 2 +- .../integration/helpers/left-trim-test.js | 2 +- .../helpers/policy/datacenters-test.js | 2 +- .../integration/helpers/policy/typeof-test.js | 2 +- .../helpers/render-template-test.js | 2 +- .../integration/helpers/right-trim-test.js | 2 +- .../integration/helpers/route-match-test.js | 2 +- .../integration/helpers/searchable-test.js | 2 +- .../helpers/service/card-permissions-test.js | 2 +- .../helpers/service/external-source-test.js | 2 +- .../helpers/service/health-percentage-test.js | 2 +- .../tests/integration/helpers/slugify-test.js | 2 +- .../tests/integration/helpers/split-test.js | 2 +- .../integration/helpers/state-matches-test.js | 2 +- .../tests/integration/helpers/substr-test.js | 2 +- .../integration/helpers/svg-curve-test.js | 2 +- .../helpers/token/is-anonymous-test.js | 2 +- .../helpers/token/is-legacy-test.js | 2 +- .../integration/helpers/tween-to-test.js | 2 +- .../serializers/auth-method-test.js | 2 +- .../serializers/binding-rule-test.js | 2 +- .../serializers/coordinate-test.js | 2 +- .../serializers/discovery-chain-test.js | 2 +- .../integration/serializers/intention-test.js | 2 +- .../tests/integration/serializers/kv-test.js | 2 +- .../integration/serializers/node-test.js | 2 +- .../integration/serializers/nspace-test.js | 2 +- .../serializers/oidc-provider-test.js | 2 +- .../integration/serializers/partition-test.js | 2 +- .../integration/serializers/policy-test.js | 2 +- .../integration/serializers/role-test.js | 2 +- .../serializers/service-instance-test.js | 2 +- .../integration/serializers/service-test.js | 2 +- .../integration/serializers/session-test.js | 2 +- .../integration/serializers/token-test.js | 2 +- .../integration/serializers/topology-test.js | 2 +- .../services/repository/auth-method-test.js | 2 +- .../services/repository/coordinate-test.js | 2 +- .../services/repository/dc-test.js | 2 +- .../repository/discovery-chain-test.js | 2 +- .../services/repository/kv-test.js | 2 +- .../services/repository/node-test.js | 2 +- .../services/repository/policy-test.js | 2 +- .../services/repository/role-test.js | 2 +- .../services/repository/service-test.js | 2 +- .../services/repository/session-test.js | 2 +- .../services/repository/token-test.js | 2 +- .../services/repository/topology-test.js | 2 +- .../integration/services/routlet-test.js | 2 +- .../utils/dom/event-source/callable-test.js | 4 +- .../consul-ui/tests/lib/measure/getMeasure.js | 2 +- .../tests/lib/page-object/createCancelable.js | 2 +- .../tests/lib/page-object/createCreatable.js | 2 +- .../tests/lib/page-object/createDeletable.js | 2 +- .../tests/lib/page-object/createSubmitable.js | 2 +- .../consul-ui/tests/lib/page-object/index.js | 2 +- .../tests/lib/page-object/visitable.js | 2 +- ui/packages/consul-ui/tests/pages.js | 2 +- ui/packages/consul-ui/tests/pages/dc.js | 2 +- .../tests/pages/dc/acls/auth-methods/index.js | 2 +- .../consul-ui/tests/pages/dc/acls/edit.js | 2 +- .../consul-ui/tests/pages/dc/acls/index.js | 2 +- .../tests/pages/dc/acls/policies/edit.js | 2 +- .../tests/pages/dc/acls/policies/index.js | 2 +- .../tests/pages/dc/acls/roles/edit.js | 2 +- .../tests/pages/dc/acls/roles/index.js | 2 +- .../tests/pages/dc/acls/tokens/edit.js | 2 +- .../tests/pages/dc/acls/tokens/index.js | 2 +- .../tests/pages/dc/intentions/edit.js | 2 +- .../tests/pages/dc/intentions/index.js | 2 +- .../consul-ui/tests/pages/dc/kv/edit.js | 2 +- .../consul-ui/tests/pages/dc/kv/index.js | 2 +- .../consul-ui/tests/pages/dc/nodes/index.js | 2 +- .../consul-ui/tests/pages/dc/nodes/show.js | 2 +- .../consul-ui/tests/pages/dc/nspaces/edit.js | 2 +- .../consul-ui/tests/pages/dc/nspaces/index.js | 2 +- .../consul-ui/tests/pages/dc/peers/index.js | 2 +- .../consul-ui/tests/pages/dc/peers/show.js | 2 +- .../tests/pages/dc/routing-config.js | 2 +- .../tests/pages/dc/services/index.js | 2 +- .../tests/pages/dc/services/instance.js | 2 +- .../consul-ui/tests/pages/dc/services/show.js | 2 +- ui/packages/consul-ui/tests/pages/index.js | 2 +- ui/packages/consul-ui/tests/pages/settings.js | 2 +- ui/packages/consul-ui/tests/steps.js | 2 +- .../consul-ui/tests/steps/assertions/dom.js | 2 +- .../consul-ui/tests/steps/assertions/form.js | 2 +- .../consul-ui/tests/steps/assertions/http.js | 2 +- .../consul-ui/tests/steps/assertions/model.js | 2 +- .../consul-ui/tests/steps/assertions/page.js | 2 +- .../consul-ui/tests/steps/debug/index.js | 2 +- .../consul-ui/tests/steps/doubles/http.js | 2 +- .../consul-ui/tests/steps/doubles/model.js | 2 +- .../tests/steps/interactions/click.js | 2 +- .../tests/steps/interactions/form.js | 2 +- .../tests/steps/interactions/visit.js | 2 +- ui/packages/consul-ui/tests/test-helper.js | 2 +- .../consul-ui/tests/unit/abilities/-test.js | 5 +- .../tests/unit/adapters/application-test.js | 2 +- .../tests/unit/adapters/auth-method-test.js | 2 +- .../tests/unit/adapters/binding-rule-test.js | 2 +- .../tests/unit/adapters/coordinate-test.js | 2 +- .../unit/adapters/discovery-chain-test.js | 2 +- .../tests/unit/adapters/http-test.js | 2 +- .../tests/unit/adapters/intention-test.js | 2 +- .../consul-ui/tests/unit/adapters/kv-test.js | 2 +- .../tests/unit/adapters/node-test.js | 2 +- .../tests/unit/adapters/nspace-test.js | 2 +- .../tests/unit/adapters/oidc-provider-test.js | 2 +- .../tests/unit/adapters/partition-test.js | 2 +- .../tests/unit/adapters/permission-test.js | 2 +- .../tests/unit/adapters/policy-test.js | 2 +- .../tests/unit/adapters/proxy-test.js | 2 +- .../tests/unit/adapters/role-test.js | 2 +- .../unit/adapters/service-instance-test.js | 2 +- .../tests/unit/adapters/session-test.js | 2 +- .../tests/unit/adapters/token-test.js | 2 +- .../get-alternate-services-test.js | 2 +- .../discovery-chain/get-resolvers-test.js | 2 +- .../discovery-chain/get-splitters-test.js | 2 +- .../components/search-bar/filters-test.js | 2 +- .../unit/controllers/application-test.js | 2 +- .../dc/acls/policies/create-test.js | 2 +- .../controllers/dc/acls/policies/edit-test.js | 2 +- .../controllers/dc/acls/roles/create-test.js | 2 +- .../controllers/dc/acls/roles/edit-test.js | 2 +- .../controllers/dc/acls/tokens/create-test.js | 2 +- .../controllers/dc/acls/tokens/edit-test.js | 2 +- .../unit/filter/predicates/intention-test.js | 2 +- .../unit/filter/predicates/service-test.js | 2 +- .../tests/unit/helpers/document-attrs-test.js | 2 +- .../unit/helpers/policy/datacenters-test.js | 2 +- .../unit/helpers/token/is-anonymous-test.js | 2 +- .../unit/helpers/token/is-legacy-test.js | 2 +- .../tests/unit/mixins/policy/as-many-test.js | 2 +- .../tests/unit/mixins/role/as-many-test.js | 2 +- .../unit/mixins/with-blocking-actions-test.js | 2 +- .../tests/unit/models/auth-method-test.js | 2 +- .../tests/unit/models/coordinate-test.js | 2 +- .../consul-ui/tests/unit/models/dc-test.js | 2 +- .../tests/unit/models/discovery-chain-test.js | 2 +- .../tests/unit/models/intention-test.js | 2 +- .../consul-ui/tests/unit/models/kv-test.js | 2 +- .../consul-ui/tests/unit/models/node-test.js | 2 +- .../tests/unit/models/oidc-provider-test.js | 2 +- .../tests/unit/models/partition-test.js | 2 +- .../tests/unit/models/permission-test.js | 2 +- .../tests/unit/models/policy-test.js | 2 +- .../consul-ui/tests/unit/models/proxy-test.js | 2 +- .../consul-ui/tests/unit/models/role-test.js | 2 +- .../unit/models/service-instance-test.js | 2 +- .../tests/unit/models/service-test.js | 2 +- .../tests/unit/models/session-test.js | 2 +- .../consul-ui/tests/unit/models/token-test.js | 2 +- .../tests/unit/routes/application-test.js | 2 +- .../consul-ui/tests/unit/routes/dc-test.js | 2 +- .../routes/dc/acls/policies/create-test.js | 2 +- .../unit/routes/dc/acls/policies/edit-test.js | 2 +- .../routes/dc/acls/policies/index-test.js | 2 +- .../unit/routes/dc/acls/roles/create-test.js | 2 +- .../unit/routes/dc/acls/roles/edit-test.js | 2 +- .../unit/routes/dc/acls/roles/index-test.js | 2 +- .../unit/routes/dc/acls/tokens/create-test.js | 2 +- .../unit/routes/dc/acls/tokens/edit-test.js | 2 +- .../unit/routes/dc/acls/tokens/index-test.js | 2 +- .../unit/search/predicates/intention-test.js | 2 +- .../tests/unit/search/predicates/kv-test.js | 2 +- .../tests/unit/search/predicates/node-test.js | 2 +- .../unit/search/predicates/policy-test.js | 2 +- .../tests/unit/search/predicates/role-test.js | 2 +- .../unit/search/predicates/service-test.js | 2 +- .../unit/search/predicates/token-test.js | 2 +- .../unit/serializers/application-test.js | 2 +- .../unit/serializers/auth-method-test.js | 2 +- .../unit/serializers/binding-rule-test.js | 2 +- .../tests/unit/serializers/coordinate-test.js | 2 +- .../unit/serializers/discovery-chain-test.js | 2 +- .../tests/unit/serializers/intention-test.js | 2 +- .../tests/unit/serializers/kv-test.js | 2 +- .../tests/unit/serializers/node-test.js | 2 +- .../tests/unit/serializers/nspace-test.js | 2 +- .../unit/serializers/oidc-provider-test.js | 2 +- .../tests/unit/serializers/partition-test.js | 2 +- .../tests/unit/serializers/permission-test.js | 2 +- .../tests/unit/serializers/policy-test.js | 2 +- .../tests/unit/serializers/proxy-test.js | 2 +- .../tests/unit/serializers/role-test.js | 2 +- .../unit/serializers/service-instance-test.js | 2 +- .../tests/unit/serializers/service-test.js | 2 +- .../tests/unit/serializers/session-test.js | 2 +- .../tests/unit/serializers/token-test.js | 2 +- .../tests/unit/services/atob-test.js | 2 +- .../tests/unit/services/btoa-test.js | 2 +- .../unit/services/client/connections-test.js | 2 +- .../tests/unit/services/client/http-test.js | 2 +- .../services/client/transports/xhr-test.js | 2 +- .../services/clipboard/local-storage-test.js | 2 +- .../tests/unit/services/clipboard/os-test.js | 2 +- .../unit/services/code-mirror/linter-test.js | 2 +- .../data-source/protocols/http-test.js | 2 +- .../protocols/local-storage-test.js | 2 +- .../tests/unit/services/data-structs-test.js | 2 +- .../consul-ui/tests/unit/services/dom-test.js | 2 +- .../tests/unit/services/encoder-test.js | 2 +- .../consul-ui/tests/unit/services/env-test.js | 2 +- .../tests/unit/services/feedback-test.js | 2 +- .../tests/unit/services/form-test.js | 2 +- .../tests/unit/services/logger-test.js | 2 +- .../tests/unit/services/repository-test.js | 2 +- .../services/repository/auth-method-test.js | 2 +- .../services/repository/coordinate-test.js | 2 +- .../tests/unit/services/repository/dc-test.js | 2 +- .../repository/discovery-chain-test.js | 2 +- .../services/repository/intention-test.js | 2 +- .../tests/unit/services/repository/kv-test.js | 2 +- .../unit/services/repository/node-test.js | 2 +- .../unit/services/repository/nspace-test.js | 2 +- .../services/repository/oidc-provider-test.js | 2 +- .../services/repository/partition-test.js | 2 +- .../services/repository/permission-test.js | 2 +- .../unit/services/repository/policy-test.js | 2 +- .../unit/services/repository/role-test.js | 2 +- .../repository/service-instance-test.js | 2 +- .../unit/services/repository/service-test.js | 2 +- .../unit/services/repository/session-test.js | 2 +- .../unit/services/repository/token-test.js | 2 +- .../tests/unit/services/search-test.js | 2 +- .../tests/unit/services/settings-test.js | 2 +- .../tests/unit/services/sort-test.js | 2 +- .../tests/unit/services/state-test.js | 2 +- .../tests/unit/services/store-test.js | 2 +- .../tests/unit/services/temporal-test.js | 2 +- .../tests/unit/services/ticker-test.js | 2 +- .../tests/unit/services/timeout-test.js | 2 +- .../tests/unit/sort/comparators/node-test.js | 2 +- .../unit/sort/comparators/service-test.js | 2 +- .../consul-ui/tests/unit/utils/ascend-test.js | 2 +- .../consul-ui/tests/unit/utils/atob-test.js | 2 +- .../consul-ui/tests/unit/utils/btoa-test.js | 2 +- .../tests/unit/utils/callable-type-test.js | 2 +- .../unit/utils/create-fingerprinter-test.js | 2 +- .../unit/utils/dom/click-first-anchor-test.js | 2 +- .../tests/unit/utils/dom/closest-test.js | 2 +- .../unit/utils/dom/create-listeners-test.js | 2 +- .../utils/dom/event-source/blocking-test.js | 2 +- .../unit/utils/dom/event-source/cache-test.js | 2 +- .../utils/dom/event-source/callable-test.js | 2 +- .../unit/utils/dom/event-source/index-test.js | 2 +- .../utils/dom/event-source/openable-test.js | 2 +- .../unit/utils/dom/event-source/proxy-test.js | 2 +- .../utils/dom/event-source/resolver-test.js | 2 +- .../utils/dom/event-source/storage-test.js | 2 +- .../unit/utils/dom/event-target/rsvp-test.js | 2 +- .../utils/dom/get-component-factory-test.js | 2 +- .../tests/unit/utils/dom/is-outside-test.js | 2 +- .../unit/utils/dom/normalize-event-test.js | 2 +- .../tests/unit/utils/dom/qsa-factory-test.js | 2 +- .../tests/unit/utils/dom/sibling-test.js | 2 +- .../tests/unit/utils/get-environment-test.js | 2 +- .../unit/utils/get-form-name-property-test.js | 2 +- .../unit/utils/helpers/call-if-type-test.js | 2 +- .../unit/utils/http/create-headers-test.js | 2 +- .../utils/http/create-query-params-test.js | 2 +- .../tests/unit/utils/http/create-url-test.js | 2 +- .../tests/unit/utils/http/error-test.js | 2 +- .../tests/unit/utils/http/request-test.js | 2 +- .../tests/unit/utils/http/xhr-test.js | 2 +- .../tests/unit/utils/isFolder-test.js | 2 +- .../tests/unit/utils/keyToArray-test.js | 2 +- .../tests/unit/utils/left-trim-test.js | 2 +- .../tests/unit/utils/maybe-call-test.js | 2 +- .../tests/unit/utils/merge-checks-test.js | 2 +- .../tests/unit/utils/non-empty-set-test.js | 2 +- .../tests/unit/utils/path/resolve-test.js | 2 +- .../tests/unit/utils/promisedTimeout-test.js | 2 +- .../tests/unit/utils/right-trim-test.js | 2 +- .../unit/utils/routing/transitionable-test.js | 2 +- .../tests/unit/utils/routing/walk-test.js | 2 +- .../tests/unit/utils/routing/wildcard-test.js | 2 +- .../unit/utils/storage/local-storage-test.js | 2 +- .../tests/unit/utils/templatize-test.js | 2 +- .../tests/unit/utils/ticker/index-test.js | 2 +- .../tests/unit/utils/ucfirst-test.js | 2 +- .../unit/utils/update-array-object-test.js | 2 +- .../consul-ui/translations/common/en-us.yaml | 2 +- .../translations/components/app/en-us.yaml | 2 +- .../translations/components/consul/en-us.yaml | 4 +- .../en-us.yaml | 2 +- .../components/hashicorp-consul/en-us.yaml | 47 - .../components/link-to-hcp-banner/en-us.yaml | 10 - .../consul-ui/translations/models/en-us.yaml | 2 +- .../consul-ui/translations/routes/en-us.yaml | 2 +- .../vendor/consul-ui/routes-debug.js | 2 +- .../consul-ui/vendor/consul-ui/routes.js | 5 +- .../vendor/consul-ui/services-debug.js | 2 +- .../consul-ui/vendor/consul-ui/services.js | 2 +- ui/packages/consul-ui/vendor/init.js | 2 +- .../vendor/metrics-providers/consul.js | 2 +- .../vendor/metrics-providers/prometheus.js | 2 +- ui/yarn.lock | 4411 ++++---- version/VERSION | 2 +- version/version.go | 2 +- version/version_test.go | 2 +- version/versiontest/versiontest.go | 13 - website/.eslintrc.js | 2 +- website/.husky/pre-commit | 3 - website/.nvmrc | 2 +- website/.stylelintrc.js | 2 +- website/README.md | 2 +- website/content/api-docs/acl/auth-methods.mdx | 8 +- .../content/api-docs/acl/binding-rules.mdx | 99 +- website/content/api-docs/acl/index.mdx | 10 +- website/content/api-docs/acl/policies.mdx | 2 +- website/content/api-docs/acl/roles.mdx | 81 +- .../api-docs/acl/templated-policies.mdx | 189 - website/content/api-docs/acl/tokens.mdx | 33 +- website/content/api-docs/agent/check.mdx | 2 +- website/content/api-docs/agent/connect.mdx | 2 +- website/content/api-docs/agent/service.mdx | 2 +- website/content/api-docs/api-structure.mdx | 9 +- website/content/api-docs/catalog.mdx | 4 +- website/content/api-docs/config.mdx | 2 +- .../content/api-docs/connect/intentions.mdx | 2 +- website/content/api-docs/discovery-chain.mdx | 8 +- .../content/api-docs/exported-services.mdx | 143 - website/content/api-docs/hcp-link.mdx | 190 - website/content/api-docs/health.mdx | 2 +- website/content/api-docs/index.mdx | 1 - website/content/api-docs/kv.mdx | 2 +- website/content/api-docs/session.mdx | 2 +- website/content/api-docs/status.mdx | 2 +- .../commands/acl/binding-rule/create.mdx | 34 +- .../commands/acl/binding-rule/update.mdx | 2 +- .../content/commands/acl/set-agent-token.mdx | 28 +- .../commands/acl/templated-policy/index.mdx | 72 - .../commands/acl/templated-policy/list.mdx | 43 - .../commands/acl/templated-policy/preview.mdx | 130 - .../commands/acl/templated-policy/read.mdx | 46 - website/content/commands/connect/envoy.mdx | 2 +- website/content/commands/debug.mdx | 4 +- .../commands/peering/exported-services.mdx | 50 - website/content/commands/peering/index.mdx | 14 +- .../commands/services/exported-services.mdx | 57 - website/content/commands/snapshot/agent.mdx | 6 +- website/content/commands/watch.mdx | 5 - .../content/docs/agent/config/cli-flags.mdx | 6 +- .../docs/agent/config/config-files.mdx | 34 +- website/content/docs/agent/index.mdx | 3 + .../usage/limit-request-rates-from-ips.mdx | 6 +- .../usage/set-global-traffic-rate-limits.mdx | 2 +- website/content/docs/agent/rpc.mdx | 260 + website/content/docs/agent/telemetry.mdx | 4 +- .../api-gateway/configuration/gateway.mdx | 4 +- .../configuration/gatewayclass.mdx | 2 +- .../configuration/gatewayclassconfig.mdx | 0 .../docs/api-gateway/configuration/index.mdx | 24 + .../api-gateway/configuration/meshservice.mdx | 0 .../api-gateway/configuration/routes.mdx | 343 +- website/content/docs/api-gateway/index.mdx | 45 + website/content/docs/api-gateway/install.mdx | 122 + .../content/docs/api-gateway/tech-specs.mdx | 71 + .../upgrades.mdx} | 8 +- .../usage}/errors.mdx | 2 +- .../usage}/reroute-http-requests.mdx | 10 +- .../usage}/route-to-peered-services.mdx | 18 +- .../content/docs/api-gateway/usage/usage.mdx | 87 + .../content/docs/architecture/catalog/v1.mdx | 37 - .../content/docs/architecture/catalog/v2.mdx | 139 - website/content/docs/connect/ca/index.mdx | 21 +- website/content/docs/connect/ca/vault.mdx | 40 +- .../docs/connect/cluster-peering/index.mdx | 2 +- .../usage/create-sameness-groups.mdx | 115 +- .../usage/establish-cluster-peering.mdx | 4 +- .../usage/peering-traffic-management.mdx | 2 +- .../control-plane-request-limit.mdx | 2 +- .../connect/config-entries/http-route.mdx | 1134 -- .../config-entries/ingress-gateway.mdx | 17 +- .../connect/config-entries/proxy-defaults.mdx | 143 +- .../connect/config-entries/sameness-group.mdx | 70 +- .../config-entries/service-defaults.mdx | 981 +- .../config-entries/service-resolver.mdx | 49 +- .../connect/config-entries/service-router.mdx | 84 +- .../content/docs/connect/dataplane/index.mdx | 70 +- .../{manage-traffic => }/failover/index.mdx | 27 +- .../configuration}/api-gateway.mdx | 235 +- .../configuration/gatewaypolicy.mdx | 259 - .../api-gateway/configuration/http-route.mdx | 684 ++ .../api-gateway/configuration/index.mdx | 44 - .../configuration}/inline-certificate.mdx | 2 +- .../configuration/routeauthfilter.mdx | 139 - .../configuration/routeretryfilter.mdx | 206 - .../configuration/routetimeoutfilter.mdx | 114 - .../api-gateway/configuration}/tcp-route.mdx | 2 +- .../api-gateway/define-routes/routes-k8s.mdx | 68 - .../api-gateway/define-routes/routes-vms.mdx | 121 - .../api-gateway/deploy/listeners-k8s.mdx | 74 - .../api-gateway/deploy/listeners-vms.mdx | 113 - .../connect/gateways/api-gateway/index.mdx | 73 +- .../gateways/api-gateway/install-k8s.mdx | 83 - .../secure-traffic/encrypt-vms.mdx | 66 - .../secure-traffic/verify-jwts-k8s.mdx | 74 - .../secure-traffic/verify-jwts-vms.mdx | 45 - .../gateways/api-gateway/tech-specs.mdx | 157 - .../connect/gateways/api-gateway/usage.mdx | 211 + .../content/docs/connect/gateways/index.mdx | 4 +- .../connect/gateways/terminating-gateway.mdx | 2 +- .../discovery-chain.mdx | 0 .../{manage-traffic => l7-traffic}/index.mdx | 8 +- .../manage-traffic/failover/sameness.mdx | 203 - .../manage-traffic/limit-request-rates.mdx | 144 - .../route-to-local-upstreams.mdx | 361 - .../proxies/deploy-sidecar-services.mdx | 2 +- .../configuration/ext-authz.mdx | 13 - .../configuration/otel-access-logging.mdx | 390 - .../envoy-extensions/configuration/wasm.mdx | 4 +- .../proxies/envoy-extensions/index.mdx | 5 - .../envoy-extensions/usage/ext-authz.mdx | 29 +- .../proxies/envoy-extensions/usage/lua.mdx | 12 +- .../usage/otel-access-logging.mdx | 148 - .../content/docs/connect/proxies/envoy.mdx | 30 +- .../content/docs/connect/proxies/index.mdx | 12 +- .../docs/connect/proxies/integrate.mdx | 4 +- .../proxies/proxy-config-reference.mdx | 4 +- .../consul-vs-other/service-mesh-compare.mdx | 2 +- .../content/docs/dynamic-app-config/kv.mdx | 2 +- website/content/docs/ecs/architecture.mdx | 345 +- .../ecs/{reference => }/compatibility.mdx | 4 +- .../configuration-reference.mdx | 19 +- .../docs/ecs/deploy/bind-addresses.mdx | 47 - .../docs/ecs/deploy/configure-routes.mdx | 79 - website/content/docs/ecs/deploy/manual.mdx | 343 - .../ecs/deploy/migrate-existing-tasks.mdx | 99 - website/content/docs/ecs/deploy/terraform.mdx | 479 - website/content/docs/ecs/enterprise.mdx | 4 +- website/content/docs/ecs/index.mdx | 58 +- .../docs/ecs/manual/acl-controller.mdx | 190 + website/content/docs/ecs/manual/install.mdx | 556 + .../docs/ecs/manual/secure-configuration.mdx | 545 + .../docs/ecs/reference/consul-server-json.mdx | 120 - website/content/docs/ecs/requirements.mdx | 32 + .../content/docs/ecs/task-resource-usage.mdx | 37 + website/content/docs/ecs/tech-specs.mdx | 58 - .../content/docs/ecs/terraform/install.mdx | 452 + .../ecs/terraform/migrate-existing-tasks.mdx | 115 + .../ecs/terraform/secure-configuration.mdx | 177 + .../docs/ecs/upgrade-to-dataplanes.mdx | 68 - .../docs/enterprise/ent-to-ce-downgrades.mdx | 208 - website/content/docs/enterprise/index.mdx | 76 +- website/content/docs/install/ports.mdx | 267 +- website/content/docs/internals/index.mdx | 2 +- .../docs/k8s/annotations-and-labels.mdx | 7 +- website/content/docs/k8s/compatibility.mdx | 13 +- .../usage/create-sameness-groups.mdx | 120 +- .../usage/establish-peering.mdx | 2 +- .../cluster-peering/usage/l7-traffic.mdx | 2 +- .../cluster-peering/usage/manage-peering.mdx | 4 +- website/content/docs/k8s/connect/index.mdx | 16 +- .../enable-transparent-proxy.mdx | 2 - .../multi-cluster/index.mdx | 2 +- .../multi-cluster/vms-and-kubernetes.mdx | 1 + website/content/docs/k8s/helm.mdx | 320 +- .../docs/k8s/installation/install-cli.mdx | 20 - .../content/docs/k8s/installation/install.mdx | 41 +- .../docs/k8s/l7-traffic/failover-tproxy.mdx | 2 +- .../l7-traffic/route-to-virtual-services.mdx | 2 +- .../content/docs/k8s/multiport/configure.mdx | 470 - website/content/docs/k8s/multiport/index.mdx | 66 - .../k8s/multiport/reference/grpcroute.mdx | 804 -- .../k8s/multiport/reference/httproute.mdx | 921 -- .../reference/proxyconfiguration.mdx | 691 -- .../multiport/reference/resource-command.mdx | 389 - .../docs/k8s/multiport/reference/tcproute.mdx | 341 - .../reference/trafficpermissions.mdx | 245 - .../docs/k8s/multiport/traffic-split.mdx | 164 - website/content/docs/k8s/upgrade/index.mdx | 134 +- website/content/docs/nia/configuration.mdx | 2 +- website/content/docs/nia/usage/errors-ref.mdx | 2 +- .../consul-api-gateway/v0_1_x.mdx | 2 +- .../consul-api-gateway/v0_2_x.mdx | 2 +- .../consul-api-gateway/v0_3_x.mdx | 2 +- .../consul-api-gateway/v0_4_x.mdx | 6 +- .../consul-api-gateway/v0_5_x.mdx | 4 +- .../docs/release-notes/consul-ecs/v0_5_x.mdx | 4 +- .../docs/release-notes/consul-ecs/v0_7_x.mdx | 69 - .../docs/release-notes/consul-k8s/v1_2_x.mdx | 2 +- .../docs/release-notes/consul-k8s/v1_3_x.mdx | 51 - .../docs/release-notes/consul/v1_16_x.mdx | 4 +- .../docs/release-notes/consul/v1_17_x.mdx | 92 - .../content/docs/security/acl/acl-roles.mdx | 121 +- .../content/docs/security/acl/acl-rules.mdx | 51 +- .../docs/security/acl/auth-methods/index.mdx | 2 +- website/content/docs/security/acl/index.mdx | 24 +- .../acl/tokens/create/create-a-dns-token.mdx | 292 +- .../tokens/create/create-a-service-token.mdx | 52 +- .../docs/security/acl/tokens/index.mdx | 44 +- .../docs/security/security-models/core.mdx | 6 + .../checks-configuration-reference.mdx | 4 +- .../services-configuration-reference.mdx | 101 +- .../services/discovery/dns-configuration.mdx | 2 +- .../services/discovery/dns-static-lookups.mdx | 4 +- .../docs/upgrading/upgrade-specific.mdx | 25 +- website/data/api-docs-nav-data.json | 12 - website/data/commands-nav-data.json | 29 - website/data/docs-nav-data.json | 425 +- website/package-lock.json | 652 +- website/package.json | 18 +- website/prettier.config.js | 2 +- website/public/ie-warning.js | 2 +- ...ul-on-ecs-architecture-dataplanes-dark.png | Bin 72818 -> 0 bytes .../consul-on-ecs-architecture-dataplanes.png | Bin 72503 -> 0 bytes website/redirects.js | 103 +- website/scripts/should-build.sh | 2 +- website/scripts/website-build.sh | 2 +- website/scripts/website-start.sh | 2 +- 10155 files changed, 65112 insertions(+), 391363 deletions(-) delete mode 100644 .changelog/17107.txt delete mode 100644 .changelog/17155.txt delete mode 100644 .changelog/17481.txt delete mode 100644 .changelog/17593.txt delete mode 100644 .changelog/17694.txt delete mode 100644 .changelog/17831.txt delete mode 100644 .changelog/17936.txt delete mode 100644 .changelog/18007.txt delete mode 100644 .changelog/18300.txt create mode 100644 .changelog/18303.txt delete mode 100644 .changelog/18324.txt delete mode 100644 .changelog/18336.txt delete mode 100644 .changelog/18367.txt delete mode 100644 .changelog/18439.txt delete mode 100644 .changelog/18504.txt delete mode 100644 .changelog/18560.txt delete mode 100644 .changelog/18573.txt delete mode 100644 .changelog/18583.txt delete mode 100644 .changelog/18646.txt delete mode 100644 .changelog/18668.txt delete mode 100644 .changelog/18708.txt delete mode 100644 .changelog/18719.txt delete mode 100644 .changelog/18769.txt delete mode 100644 .changelog/18813.txt delete mode 100644 .changelog/18816.txt delete mode 100644 .changelog/18943.txt delete mode 100644 .changelog/18983.txt delete mode 100644 .changelog/18994.txt delete mode 100644 .changelog/19077.txt create mode 100644 .changelog/19120.txt delete mode 100644 .changelog/19218.txt create mode 100644 .changelog/19273.txt delete mode 100644 .changelog/19306.txt delete mode 100644 .changelog/19311.txt delete mode 100644 .changelog/19314.txt delete mode 100644 .changelog/19342.txt delete mode 100644 .changelog/19389.txt create mode 100644 .changelog/19443.txt delete mode 100644 .changelog/19499.txt delete mode 100644 .changelog/19549.txt delete mode 100644 .changelog/19586.txt delete mode 100644 .changelog/19594.txt delete mode 100644 .changelog/19647.txt delete mode 100644 .changelog/19666.txt delete mode 100644 .changelog/19728.txt delete mode 100644 .changelog/19735.txt delete mode 100644 .changelog/19821.txt delete mode 100644 .changelog/19827.txt delete mode 100644 .changelog/19879.txt delete mode 100644 .changelog/19907.txt delete mode 100644 .changelog/19943.txt delete mode 100644 .changelog/19992.txt delete mode 100644 .changelog/20013.txt delete mode 100644 .changelog/20015.txt delete mode 100644 .changelog/20023.txt delete mode 100644 .changelog/20078.txt delete mode 100644 .changelog/20111.txt delete mode 100644 .changelog/20220.txt delete mode 100644 .changelog/20275.txt delete mode 100644 .changelog/20299.txt delete mode 100644 .changelog/20308.txt delete mode 100644 .changelog/20312.txt delete mode 100644 .changelog/20331.txt delete mode 100644 .changelog/20352.txt delete mode 100644 .changelog/20353.txt delete mode 100644 .changelog/20359.txt delete mode 100644 .changelog/20474.txt delete mode 100644 .changelog/20514.txt delete mode 100644 .changelog/20544.txt create mode 100644 .changelog/20586.txt delete mode 100644 .changelog/20589.txt delete mode 100644 .changelog/20642.txt delete mode 100644 .changelog/20643.txt delete mode 100644 .changelog/20679.txt delete mode 100644 .changelog/_18366.txt delete mode 100644 .changelog/_18422.txt create mode 100644 .changelog/_20721.txt delete mode 100644 .changelog/_6074.txt delete mode 100644 .changelog/_6870.txt delete mode 100755 .github/scripts/get_runner_classes_windows.sh create mode 100755 .github/scripts/notify_slack.sh create mode 100644 .github/workflows/copywrite.hcl rename .github/workflows/{nightly-test-1.16.x.yaml => nightly-test-1.12.x.yaml} (75%) rename .github/workflows/{nightly-test-1.17.x.yaml => nightly-test-1.13.x.yaml} (73%) delete mode 100644 .grpcmocks.yaml delete mode 100644 .pre-commit-config.yaml delete mode 100644 .release/docker/docker-entrypoint-windows.sh delete mode 100644 Dockerfile-windows rename Makefile => GNUmakefile (66%) create mode 100644 NOTICE.md delete mode 100644 agent/cacheshim/cache.go delete mode 100644 agent/connect/uri_workload_identity.go delete mode 100644 agent/connect/uri_workload_identity_ce.go delete mode 100644 agent/connect/uri_workload_identity_test.go create mode 100644 agent/connect_auth.go rename testing/deployer/topology/generate.go => agent/consul/config_cloud.go (51%) delete mode 100644 agent/consul/configentry_backend.go delete mode 100644 agent/consul/configentry_backend_ce.go delete mode 100644 agent/consul/configentry_backend_ce_test.go delete mode 100644 agent/consul/configentry_backend_test.go delete mode 100644 agent/consul/fsm/decode_ce.go delete mode 100644 agent/consul/fsm/decode_downgrade.go delete mode 100644 agent/consul/gateways/controller_gateways_ce.go delete mode 100644 agent/consul/leader_ce.go delete mode 100644 agent/consul/leader_registrator_v1.go delete mode 100644 agent/consul/leader_registrator_v1_test.go delete mode 100644 agent/consul/leader_registrator_v2.go delete mode 100644 agent/consul/leader_registrator_v2_test.go delete mode 100644 agent/consul/server_grpc.go delete mode 100644 agent/consul/state/config_entry_exported_services_ce_test.go delete mode 100644 agent/consul/tenancy_bridge.go delete mode 100644 agent/consul/tenancy_bridge_ce.go delete mode 100644 agent/consul/testdata/v2-resource-dependencies.md delete mode 100644 agent/consul/type_registry.go delete mode 100644 agent/consul/v2_config_entry_exports_shim.go delete mode 100644 agent/consul/v2_config_entry_exports_shim_test.go delete mode 100644 agent/discovery/discovery.go delete mode 100644 agent/discovery/discovery_test.go delete mode 100644 agent/discovery/mock_CatalogDataFetcher.go delete mode 100644 agent/discovery/query_fetcher_v1.go delete mode 100644 agent/discovery/query_fetcher_v1_ce.go delete mode 100644 agent/discovery/query_fetcher_v1_ce_test.go delete mode 100644 agent/discovery/query_fetcher_v1_test.go delete mode 100644 agent/discovery/query_fetcher_v2.go delete mode 100644 agent/discovery/query_fetcher_v2_test.go rename agent/{structs => dns}/dns.go (54%) delete mode 100644 agent/dns/dns_address.go delete mode 100644 agent/dns/dns_address_test.go rename agent/{structs => dns}/dns_test.go (96%) delete mode 100644 agent/dns/mock_DNSRouter.go delete mode 100644 agent/dns/mock_dnsRecursor.go delete mode 100644 agent/dns/parser.go delete mode 100644 agent/dns/recursor.go delete mode 100644 agent/dns/recursor_test.go delete mode 100644 agent/dns/router.go delete mode 100644 agent/dns/router_query.go delete mode 100644 agent/dns/router_query_test.go delete mode 100644 agent/dns/router_response.go delete mode 100644 agent/dns/router_service_test.go delete mode 100644 agent/dns/router_test.go delete mode 100644 agent/dns/server.go create mode 100644 agent/dns/validation.go create mode 100644 agent/dns/validation_test.go delete mode 100644 agent/dns_catalogv2_test.go delete mode 100644 agent/dns_node_lookup_test.go delete mode 100644 agent/dns_reverse_lookup_test.go delete mode 100644 agent/dns_service_lookup_test.go delete mode 100644 agent/envoyextensions/builtin/otel-access-logging/otel_access_logging.go delete mode 100644 agent/envoyextensions/builtin/otel-access-logging/otel_access_logging_test.go delete mode 100644 agent/envoyextensions/builtin/otel-access-logging/structs.go delete mode 100644 agent/envoyextensions/registered_extensions_ce.go delete mode 100644 agent/grpc-external/services/configentry/server.go delete mode 100644 agent/grpc-external/services/configentry/server_ce_test.go delete mode 100644 agent/grpc-external/services/configentry/server_test.go delete mode 100644 agent/grpc-external/services/dns/server_v2.go delete mode 100644 agent/grpc-external/services/dns/server_v2_test.go delete mode 100644 agent/grpc-external/services/resource/delete_ce.go delete mode 100644 agent/grpc-external/services/resource/mock_TenancyBridge.go delete mode 100644 agent/grpc-external/services/resource/mutate_and_validate.go delete mode 100644 agent/grpc-external/services/resource/mutate_and_validate_test.go delete mode 100644 agent/grpc-external/services/resource/server_ce.go delete mode 100644 agent/grpc-external/services/resource/server_ce_test.go delete mode 100644 agent/grpc-external/services/resource/testing/builder.go delete mode 100644 agent/grpc-external/services/resource/testing/builder_ce.go delete mode 100644 agent/grpc-external/services/resource/testing/testing_ce.go delete mode 100644 agent/grpc-external/services/resource/write_mav_common_test.go delete mode 100644 agent/grpc-external/testutils/mock_server_transport_stream.go delete mode 100644 agent/hcp/bootstrap/config-loader/loader.go delete mode 100644 agent/hcp/bootstrap/config-loader/loader_test.go delete mode 100644 agent/hcp/bootstrap/constants/constants.go delete mode 100644 agent/hcp/client/errors.go delete mode 100644 agent/hcp/client/http_client.go delete mode 100644 agent/hcp/client/http_client_test.go rename agent/hcp/{config => client}/mock_CloudConfig.go (98%) delete mode 100644 agent/hcp/config/config_test.go delete mode 100644 agent/hcp/link_watch.go delete mode 100644 agent/hcp/link_watch_test.go delete mode 100644 agent/hcp/manager_lifecycle.go delete mode 100644 agent/hcp/manager_lifecycle_test.go delete mode 100644 agent/hcp/mock_Manager.go delete mode 100644 agent/hcp/mock_TelemetryProvider.go delete mode 100644 agent/hcp/scada/scada_test.go rename agent/leafcert/{leafcert_test_helpers.go => signer_test.go} (57%) delete mode 100644 agent/proxycfg-sources/catalog/config_source_oss.go delete mode 100644 agent/proxycfg/api_gateway_ce.go delete mode 100644 agent/proxycfg/config_snapshot_glue.go delete mode 100644 agent/proxycfg/config_snapshot_glue_test.go delete mode 100644 agent/structs/acl_templated_policy.go delete mode 100644 agent/structs/acl_templated_policy_ce.go delete mode 100644 agent/structs/acl_templated_policy_ce_test.go delete mode 100644 agent/structs/acl_templated_policy_test.go delete mode 100644 agent/structs/acltemplatedpolicy/policies/ce/api-gateway.hcl delete mode 100644 agent/structs/acltemplatedpolicy/policies/ce/dns.hcl delete mode 100644 agent/structs/acltemplatedpolicy/policies/ce/node.hcl delete mode 100644 agent/structs/acltemplatedpolicy/policies/ce/nomad-client.hcl delete mode 100644 agent/structs/acltemplatedpolicy/policies/ce/nomad-server.hcl delete mode 100644 agent/structs/acltemplatedpolicy/policies/ce/service.hcl delete mode 100644 agent/structs/acltemplatedpolicy/policies/ce/workload-identity.hcl delete mode 100644 agent/structs/acltemplatedpolicy/schemas/api-gateway.json delete mode 100644 agent/structs/acltemplatedpolicy/schemas/node.json delete mode 100644 agent/structs/acltemplatedpolicy/schemas/service.json delete mode 100644 agent/structs/acltemplatedpolicy/schemas/workload-identity.json delete mode 100644 agent/structs/config_entry_apigw_jwt_ce.go delete mode 100644 agent/structs/structs.deepcopy_ce.go delete mode 100644 agent/xds/configfetcher/config_fetcher.go delete mode 100644 agent/xds/gw_per_route_filters_ce.go delete mode 100644 agent/xds/jwt_authn_ce.go delete mode 100644 agent/xds/locality_policy.go delete mode 100644 agent/xds/locality_policy_ce.go create mode 100644 agent/xds/naming.go delete mode 100644 agent/xds/naming/naming.go create mode 100644 agent/xds/net_fallback.go rename agent/xds/{platform => }/net_linux.go (85%) delete mode 100644 agent/xds/platform/net_fallback.go delete mode 100644 agent/xds/proxystateconverter/clusters.go delete mode 100644 agent/xds/proxystateconverter/converter.go delete mode 100644 agent/xds/proxystateconverter/endpoints.go delete mode 100644 agent/xds/proxystateconverter/failover_policy.go delete mode 100644 agent/xds/proxystateconverter/failover_policy_ce.go delete mode 100644 agent/xds/proxystateconverter/listeners.go delete mode 100644 agent/xds/proxystateconverter/locality_policy.go delete mode 100644 agent/xds/proxystateconverter/locality_policy_ce.go delete mode 100644 agent/xds/proxystateconverter/routes.go rename agent/xds/{response => }/response.go (75%) delete mode 100644 agent/xds/testdata/builtin_extension/clusters/otel-access-logging-http.latest.golden delete mode 100644 agent/xds/testdata/builtin_extension/endpoints/otel-access-logging-http.latest.golden delete mode 100644 agent/xds/testdata/builtin_extension/listeners/otel-access-logging-http.latest.golden delete mode 100644 agent/xds/testdata/builtin_extension/routes/otel-access-logging-http.latest.golden delete mode 100644 agent/xds/testdata/clusters/access-logs-defaults.latest.golden delete mode 100644 agent/xds/testdata/clusters/access-logs-json-file.latest.golden delete mode 100644 agent/xds/testdata/clusters/access-logs-text-stderr-disablelistenerlogs.latest.golden delete mode 100644 agent/xds/testdata/clusters/api-gateway-http-listener-with-http-route.latest.golden delete mode 100644 agent/xds/testdata/clusters/api-gateway-http-listener.latest.golden delete mode 100644 agent/xds/testdata/clusters/api-gateway-nil-config-entry.latest.golden delete mode 100644 agent/xds/testdata/clusters/api-gateway-tcp-listener-with-tcp-and-http-route.latest.golden delete mode 100644 agent/xds/testdata/clusters/api-gateway-tcp-listener-with-tcp-route.latest.golden delete mode 100644 agent/xds/testdata/clusters/api-gateway-tcp-listener.latest.golden rename agent/xds/testdata/clusters/{api-gateway-with-http-route-timeoutfilter-one-set.latest.golden => api-gateway-with-http-route-and-inline-certificate.latest.golden} (95%) delete mode 100644 agent/xds/testdata/clusters/api-gateway-with-multiple-hostnames.latest.golden delete mode 100644 agent/xds/testdata/clusters/api-gateway-with-multiple-inline-certificates.latest.golden rename agent/xds/testdata/clusters/{api-gateway-with-http-route.latest.golden => api-gateway-with-tcp-route-and-inline-certificate.envoy-1-21-x.golden} (59%) delete mode 100644 agent/xds/testdata/clusters/api-gateway.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-resolver-with-lb.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-route-to-lb-resolver.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-splitter-overweight.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-upstream-defaults.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-chain-and-router.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-chain-and-splitter.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-chain-http2.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-default-chain-and-custom-cluster.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-grpc-chain.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-grpc-router.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-http-chain.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-http2-chain.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-peered-upstreams-escape-overrides.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-peered-upstreams-http2.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-tcp-chain.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-tls-incoming-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-tls-incoming-max-version.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-tls-incoming-min-version.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-with-tproxy-and-permissive-mtls.latest.golden delete mode 100644 agent/xds/testdata/clusters/connect-proxy-without-tproxy-and-permissive-mtls.latest.golden delete mode 100644 agent/xds/testdata/clusters/custom-passive-healthcheck-zero-consecutive_5xx.latest.golden delete mode 100644 agent/xds/testdata/clusters/custom-public-listener-http-2.latest.golden delete mode 100644 agent/xds/testdata/clusters/custom-public-listener-http-missing.latest.golden delete mode 100644 agent/xds/testdata/clusters/custom-public-listener-http.latest.golden delete mode 100644 agent/xds/testdata/clusters/custom-public-listener.latest.golden delete mode 100644 agent/xds/testdata/clusters/custom-trace-listener.latest.golden delete mode 100644 agent/xds/testdata/clusters/custom-upstream-ignored-with-disco-chain.latest.golden delete mode 100644 agent/xds/testdata/clusters/custom-upstream-with-prepared-query.latest.golden delete mode 100644 agent/xds/testdata/clusters/expose-checks-grpc.latest.golden delete mode 100644 agent/xds/testdata/clusters/expose-checks-http-with-bind-override.latest.golden delete mode 100644 agent/xds/testdata/clusters/expose-checks-http.latest.golden delete mode 100644 agent/xds/testdata/clusters/expose-checks.latest.golden delete mode 100644 agent/xds/testdata/clusters/grpc-public-listener.latest.golden delete mode 100644 agent/xds/testdata/clusters/http-listener-with-timeouts.latest.golden delete mode 100644 agent/xds/testdata/clusters/http-public-listener-no-xfcc.latest.golden delete mode 100644 agent/xds/testdata/clusters/http-public-listener.latest.golden delete mode 100644 agent/xds/testdata/clusters/http-upstream.latest.golden delete mode 100644 agent/xds/testdata/clusters/http2-public-listener.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-config-entry-nil.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-defaults-no-chain.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-grpc-multiple-services.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-http-multiple-services.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-chain-and-router-header-manip.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-chain-and-router.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-chain-and-splitter.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-grpc-router.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-grpc-single-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-http2-and-grpc-multiple-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-http2-single-tls-listener.latest.golden rename agent/xds/testdata/clusters/{ingress-gateway-bind-addrs.latest.golden => ingress-with-overwrite-defaults-service-passive-health-check.latest.golden} (85%) delete mode 100644 agent/xds/testdata/clusters/ingress-with-sds-listener+service-level.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-sds-listener-gw-level-http.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-sds-listener-gw-level-mixed-tls.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-sds-listener-gw-level.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-sds-listener-level-wildcard.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-sds-listener-level.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-sds-listener-listener-level.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-sds-service-level-2.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-sds-service-level-mixed-no-tls.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-sds-service-level-mixed-tls.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-sds-service-level.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-single-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-tls-listener-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-tls-listener-max-version.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-tls-listener-min-version.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-tls-min-version-listeners-gateway-defaults.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-tls-mixed-cipher-suites-listeners.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-tls-mixed-listeners.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-tls-mixed-max-version-listeners.latest.golden delete mode 100644 agent/xds/testdata/clusters/ingress-with-tls-mixed-min-version-listeners.latest.golden delete mode 100644 agent/xds/testdata/clusters/listener-balance-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/clusters/listener-balance-outbound-connections-bind-port.latest.golden delete mode 100644 agent/xds/testdata/clusters/listener-bind-address-port.latest.golden delete mode 100644 agent/xds/testdata/clusters/listener-bind-address.latest.golden delete mode 100644 agent/xds/testdata/clusters/listener-bind-port.latest.golden delete mode 100644 agent/xds/testdata/clusters/listener-max-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/clusters/listener-unix-domain-socket.latest.golden delete mode 100644 agent/xds/testdata/clusters/mesh-gateway-custom-addresses.latest.golden delete mode 100644 agent/xds/testdata/clusters/mesh-gateway-default-service-subset.latest.golden delete mode 100644 agent/xds/testdata/clusters/mesh-gateway-newer-information-in-federation-states.latest.golden delete mode 100644 agent/xds/testdata/clusters/mesh-gateway-older-information-in-federation-states.latest.golden delete mode 100644 agent/xds/testdata/clusters/mesh-gateway-service-subsets2.latest.golden delete mode 100644 agent/xds/testdata/clusters/mesh-gateway-tagged-addresses.latest.golden delete mode 100644 agent/xds/testdata/clusters/mesh-gateway-using-federation-control-plane.latest.golden delete mode 100644 agent/xds/testdata/clusters/terminating-gateway-custom-and-tagged-addresses.latest.golden delete mode 100644 agent/xds/testdata/clusters/terminating-gateway-custom-trace-listener.latest.golden delete mode 100644 agent/xds/testdata/clusters/terminating-gateway-default-service-subset.latest.golden delete mode 100644 agent/xds/testdata/clusters/terminating-gateway-lb-config-no-hash-policies.latest.golden delete mode 100644 agent/xds/testdata/clusters/terminating-gateway-no-api-cert.latest.golden delete mode 100644 agent/xds/testdata/clusters/terminating-gateway-with-peer-trust-bundle.latest.golden delete mode 100644 agent/xds/testdata/clusters/terminating-gateway-with-tls-incoming-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/clusters/terminating-gateway-with-tls-incoming-max-version.latest.golden delete mode 100644 agent/xds/testdata/clusters/terminating-gateway-with-tls-incoming-min-version.latest.golden delete mode 100644 agent/xds/testdata/clusters/transparent-proxy-http-upstream.latest.golden delete mode 100644 agent/xds/testdata/clusters/transparent-proxy-terminating-gateway.latest.golden delete mode 100644 agent/xds/testdata/clusters/transparent-proxy-with-resolver-redirect-upstream.latest.golden delete mode 100644 agent/xds/testdata/clusters/xds-fetch-timeout-ms-tproxy-http-peering.latest.golden delete mode 100644 agent/xds/testdata/endpoints/access-logs-defaults.latest.golden delete mode 100644 agent/xds/testdata/endpoints/access-logs-json-file.latest.golden delete mode 100644 agent/xds/testdata/endpoints/access-logs-text-stderr-disablelistenerlogs.latest.golden delete mode 100644 agent/xds/testdata/endpoints/api-gateway-http-listener-with-http-route.latest.golden delete mode 100644 agent/xds/testdata/endpoints/api-gateway-http-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/api-gateway-nil-config-entry.latest.golden delete mode 100644 agent/xds/testdata/endpoints/api-gateway-tcp-listener-with-tcp-and-http-route.latest.golden delete mode 100644 agent/xds/testdata/endpoints/api-gateway-tcp-listener-with-tcp-route.latest.golden delete mode 100644 agent/xds/testdata/endpoints/api-gateway-tcp-listener.latest.golden rename agent/{xdsv2/testdata/endpoints/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden => xds/testdata/endpoints/api-gateway-with-http-route-and-inline-certificate.latest.golden} (54%) delete mode 100644 agent/xds/testdata/endpoints/api-gateway-with-http-route-timeoutfilter-one-set.latest.golden delete mode 100644 agent/xds/testdata/endpoints/api-gateway-with-http-route.latest.golden delete mode 100644 agent/xds/testdata/endpoints/api-gateway-with-multiple-hostnames.latest.golden delete mode 100644 agent/xds/testdata/endpoints/api-gateway-with-multiple-inline-certificates.latest.golden delete mode 100644 agent/xds/testdata/endpoints/api-gateway.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-lb-in-resolver.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-resolver-with-lb.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-route-to-lb-resolver.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-splitter-overweight.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-upstream-defaults.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-chain-and-router.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-chain-and-splitter.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-chain-http2.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-grpc-chain.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-grpc-router.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-http-chain.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-http2-chain.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-jwt-config-entry-with-local.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-jwt-config-entry-with-remote-jwks.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-peered-upstreams-escape-overrides.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-peered-upstreams-http2.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-tcp-chain.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-tls-incoming-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-tls-incoming-max-version.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-tls-incoming-min-version.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-tls-outgoing-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-tls-outgoing-max-version.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-tls-outgoing-min-version-auto.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-tls-outgoing-min-version.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-tproxy-and-permissive-mtls.latest.golden delete mode 100644 agent/xds/testdata/endpoints/connect-proxy-without-tproxy-and-permissive-mtls.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-limits-max-connections-only.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-limits-set-to-zero.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-limits.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-local-app.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-max-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-passive-healthcheck-zero-consecutive_5xx.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-passive-healthcheck.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-public-listener-http-2.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-public-listener-http-missing.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-public-listener-http.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-public-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-timeouts.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-trace-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-upstream-default-chain.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-upstream-ignored-with-disco-chain.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-upstream-with-prepared-query.latest.golden delete mode 100644 agent/xds/testdata/endpoints/custom-upstream.latest.golden delete mode 100644 agent/xds/testdata/endpoints/downstream-service-with-unix-sockets.latest.golden delete mode 100644 agent/xds/testdata/endpoints/expose-checks-grpc.latest.golden delete mode 100644 agent/xds/testdata/endpoints/expose-checks-http-with-bind-override.latest.golden delete mode 100644 agent/xds/testdata/endpoints/expose-checks-http.latest.golden delete mode 100644 agent/xds/testdata/endpoints/expose-checks.latest.golden delete mode 100644 agent/xds/testdata/endpoints/expose-paths-grpc-new-cluster-http1.latest.golden delete mode 100644 agent/xds/testdata/endpoints/expose-paths-local-app-paths.latest.golden delete mode 100644 agent/xds/testdata/endpoints/expose-paths-new-cluster-http2.latest.golden delete mode 100644 agent/xds/testdata/endpoints/grpc-public-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/http-listener-with-timeouts.latest.golden delete mode 100644 agent/xds/testdata/endpoints/http-public-listener-no-xfcc.latest.golden delete mode 100644 agent/xds/testdata/endpoints/http-public-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/http-upstream.latest.golden delete mode 100644 agent/xds/testdata/endpoints/http2-public-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-config-entry-nil.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-defaults-no-chain.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-gateway-bind-addrs.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-gateway-with-tls-outgoing-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-gateway-with-tls-outgoing-max-version.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-gateway-with-tls-outgoing-min-version.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-grpc-multiple-services.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-http-multiple-services.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-lb-in-resolver.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-chain-and-router-header-manip.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-chain-and-router.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-chain-and-splitter.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-defaults-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-defaults-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-grpc-router.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-grpc-single-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-http2-and-grpc-multiple-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-http2-single-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-overwrite-defaults-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-overwrite-defaults-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-sds-listener+service-level.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-sds-listener-gw-level-http.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-sds-listener-gw-level-mixed-tls.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-sds-listener-gw-level.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-sds-listener-level-wildcard.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-sds-listener-level.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-sds-listener-listener-level.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-sds-service-level-2.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-sds-service-level-mixed-no-tls.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-sds-service-level-mixed-tls.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-sds-service-level.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-service-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-single-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-tls-listener-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-tls-listener-max-version.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-tls-listener-min-version.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-tls-min-version-listeners-gateway-defaults.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-tls-mixed-cipher-suites-listeners.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-tls-mixed-listeners.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-tls-mixed-max-version-listeners.latest.golden delete mode 100644 agent/xds/testdata/endpoints/ingress-with-tls-mixed-min-version-listeners.latest.golden delete mode 100644 agent/xds/testdata/endpoints/listener-balance-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/endpoints/listener-balance-outbound-connections-bind-port.latest.golden delete mode 100644 agent/xds/testdata/endpoints/listener-bind-address-port.latest.golden delete mode 100644 agent/xds/testdata/endpoints/listener-bind-address.latest.golden delete mode 100644 agent/xds/testdata/endpoints/listener-bind-port.latest.golden delete mode 100644 agent/xds/testdata/endpoints/listener-max-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/endpoints/listener-unix-domain-socket.latest.golden delete mode 100644 agent/xds/testdata/endpoints/mesh-gateway-custom-addresses.latest.golden delete mode 100644 agent/xds/testdata/endpoints/mesh-gateway-hash-lb-ignored.latest.golden delete mode 100644 agent/xds/testdata/endpoints/mesh-gateway-ignore-extra-resolvers.latest.golden delete mode 100644 agent/xds/testdata/endpoints/mesh-gateway-non-hash-lb-injected.latest.golden delete mode 100644 agent/xds/testdata/endpoints/mesh-gateway-service-subsets2.latest.golden delete mode 100644 agent/xds/testdata/endpoints/mesh-gateway-service-timeouts.latest.golden delete mode 100644 agent/xds/testdata/endpoints/mesh-gateway-tagged-addresses.latest.golden delete mode 100644 agent/xds/testdata/endpoints/mesh-gateway-tcp-keepalives.latest.golden delete mode 100644 agent/xds/testdata/endpoints/mesh-gateway-using-federation-control-plane.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-custom-and-tagged-addresses.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-custom-trace-listener.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-hostname-service-subsets.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-http2-upstream-subsets.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-http2-upstream.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-ignore-extra-resolvers.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-lb-config-no-hash-policies.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-lb-config.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-no-api-cert.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-sni.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-tcp-keepalives.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-with-peer-trust-bundle.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-with-tls-incoming-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-with-tls-incoming-max-version.latest.golden delete mode 100644 agent/xds/testdata/endpoints/terminating-gateway-with-tls-incoming-min-version.latest.golden delete mode 100644 agent/xds/testdata/endpoints/transparent-proxy-catalog-destinations-only.latest.golden delete mode 100644 agent/xds/testdata/endpoints/transparent-proxy-dial-instances-directly.latest.golden delete mode 100644 agent/xds/testdata/endpoints/transparent-proxy-http-upstream.latest.golden delete mode 100644 agent/xds/testdata/endpoints/transparent-proxy-terminating-gateway.latest.golden delete mode 100644 agent/xds/testdata/endpoints/transparent-proxy-with-resolver-redirect-upstream.latest.golden delete mode 100644 agent/xds/testdata/endpoints/xds-fetch-timeout-ms-tproxy-http-peering.latest.golden create mode 100644 agent/xds/testdata/listeners/api-gateway-tcp-listeners.latest.golden rename agent/xds/testdata/listeners/{api-gateway-with-multiple-hostnames.latest.golden => api-gateway-with-http-route-and-inline-certificate.latest.golden} (96%) delete mode 100644 agent/xds/testdata/listeners/api-gateway-with-http-route-timeoutfilter-one-set.latest.golden delete mode 100644 agent/xds/testdata/listeners/api-gateway-with-http-route.latest.golden delete mode 100644 agent/xds/testdata/listeners/api-gateway-with-multiple-inline-certificates.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-lb-in-resolver.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-resolver-with-lb.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-route-to-lb-resolver.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-splitter-overweight.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-chain-and-failover.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-chain-and-router.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-chain-and-splitter.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-chain-http2.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-chain.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-default-chain-and-custom-cluster.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-grpc-router.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-jwt-config-entry-with-remote-jwks.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-peered-upstreams-escape-overrides.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-peered-upstreams-http2.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tcp-chain-double-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tcp-chain-double-failover-through-local-gateway.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tcp-chain-double-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tcp-chain-double-failover-through-remote-gateway.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tcp-chain-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tcp-chain-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tls-outgoing-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tls-outgoing-max-version.latest.golden delete mode 100644 agent/xds/testdata/listeners/connect-proxy-with-tls-outgoing-min-version.latest.golden delete mode 100644 agent/xds/testdata/listeners/custom-limits-max-connections-only.latest.golden delete mode 100644 agent/xds/testdata/listeners/custom-limits-set-to-zero.latest.golden delete mode 100644 agent/xds/testdata/listeners/custom-limits.latest.golden delete mode 100644 agent/xds/testdata/listeners/custom-local-app.latest.golden delete mode 100644 agent/xds/testdata/listeners/custom-max-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/listeners/custom-passive-healthcheck-zero-consecutive_5xx.latest.golden delete mode 100644 agent/xds/testdata/listeners/custom-passive-healthcheck.latest.golden delete mode 100644 agent/xds/testdata/listeners/custom-timeouts.latest.golden delete mode 100644 agent/xds/testdata/listeners/custom-upstream-default-chain.latest.golden delete mode 100644 agent/xds/testdata/listeners/custom-upstream-with-prepared-query.latest.golden delete mode 100644 agent/xds/testdata/listeners/downstream-service-with-unix-sockets.latest.golden delete mode 100644 agent/xds/testdata/listeners/expose-checks-grpc.latest.golden delete mode 100644 agent/xds/testdata/listeners/expose-checks-http-with-bind-override.latest.golden delete mode 100644 agent/xds/testdata/listeners/expose-checks-http.latest.golden delete mode 100644 agent/xds/testdata/listeners/expose-paths-grpc-new-cluster-http1.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-config-entry-nil.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-defaults-no-chain.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-gateway-with-tls-outgoing-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-gateway-with-tls-outgoing-max-version.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-gateway-with-tls-outgoing-min-version.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-lb-in-resolver.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-multiple-listeners-duplicate-service.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-chain-and-failover-to-cluster-peer.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-chain-and-failover.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-chain-and-router-header-manip.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-chain-and-router.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-chain-and-splitter.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-chain.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-defaults-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-defaults-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-grpc-router.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-overwrite-defaults-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-overwrite-defaults-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-sds-listener-level-wildcard.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-sds-listener-level.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-sds-service-level-2.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-sds-service-level-mixed-tls.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-service-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-tcp-chain-double-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-tcp-chain-double-failover-through-local-gateway.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-tcp-chain-double-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-tcp-chain-double-failover-through-remote-gateway.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-tcp-chain-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-tcp-chain-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-tls-mixed-cipher-suites-listeners.latest.golden delete mode 100644 agent/xds/testdata/listeners/ingress-with-tls-mixed-max-version-listeners.latest.golden delete mode 100644 agent/xds/testdata/listeners/mesh-gateway-default-service-subset.latest.golden delete mode 100644 agent/xds/testdata/listeners/mesh-gateway-hash-lb-ignored.latest.golden delete mode 100644 agent/xds/testdata/listeners/mesh-gateway-ignore-extra-resolvers.latest.golden delete mode 100644 agent/xds/testdata/listeners/mesh-gateway-newer-information-in-federation-states.latest.golden delete mode 100644 agent/xds/testdata/listeners/mesh-gateway-non-hash-lb-injected.latest.golden delete mode 100644 agent/xds/testdata/listeners/mesh-gateway-older-information-in-federation-states.latest.golden delete mode 100644 agent/xds/testdata/listeners/mesh-gateway-service-subsets.latest.golden delete mode 100644 agent/xds/testdata/listeners/mesh-gateway-service-subsets2.latest.golden delete mode 100644 agent/xds/testdata/listeners/mesh-gateway-service-timeouts.latest.golden delete mode 100644 agent/xds/testdata/listeners/mesh-gateway-tcp-keepalives.latest.golden delete mode 100644 agent/xds/testdata/listeners/mesh-gateway-using-federation-control-plane.latest.golden delete mode 100644 agent/xds/testdata/listeners/terminating-gateway-custom-trace-listener.latest.golden delete mode 100644 agent/xds/testdata/listeners/terminating-gateway-default-service-subset.latest.golden delete mode 100644 agent/xds/testdata/listeners/terminating-gateway-hostname-service-subsets.latest.golden delete mode 100644 agent/xds/testdata/listeners/terminating-gateway-http2-upstream-subsets.latest.golden delete mode 100644 agent/xds/testdata/listeners/terminating-gateway-http2-upstream.latest.golden delete mode 100644 agent/xds/testdata/listeners/terminating-gateway-ignore-extra-resolvers.latest.golden delete mode 100644 agent/xds/testdata/listeners/terminating-gateway-lb-config-no-hash-policies.latest.golden delete mode 100644 agent/xds/testdata/listeners/terminating-gateway-lb-config.latest.golden delete mode 100644 agent/xds/testdata/listeners/terminating-gateway-sni.latest.golden delete mode 100644 agent/xds/testdata/listeners/terminating-gateway-tcp-keepalives.latest.golden delete mode 100644 agent/xds/testdata/listeners/xds-fetch-timeout-ms-tproxy-http-peering.latest.golden delete mode 100644 agent/xds/testdata/rbac/v2-L4-deny-L7-allow--httpfilter.golden delete mode 100644 agent/xds/testdata/rbac/v2-L4-deny-L7-allow.golden delete mode 100644 agent/xds/testdata/rbac/v2-default-allow--httpfilter.golden delete mode 100644 agent/xds/testdata/rbac/v2-default-allow.golden delete mode 100644 agent/xds/testdata/rbac/v2-default-deny--httpfilter.golden delete mode 100644 agent/xds/testdata/rbac/v2-default-deny.golden delete mode 100644 agent/xds/testdata/rbac/v2-ignore-empty-permissions--httpfilter.golden delete mode 100644 agent/xds/testdata/rbac/v2-ignore-empty-permissions.golden delete mode 100644 agent/xds/testdata/rbac/v2-kitchen-sink--httpfilter.golden delete mode 100644 agent/xds/testdata/rbac/v2-kitchen-sink.golden delete mode 100644 agent/xds/testdata/rbac/v2-path-excludes--httpfilter.golden delete mode 100644 agent/xds/testdata/rbac/v2-path-excludes.golden delete mode 100644 agent/xds/testdata/rbac/v2-path-method-header-excludes--httpfilter.golden delete mode 100644 agent/xds/testdata/rbac/v2-path-method-header-excludes.golden delete mode 100644 agent/xds/testdata/rbac/v2-single-permission-multiple-destination-rules--httpfilter.golden delete mode 100644 agent/xds/testdata/rbac/v2-single-permission-multiple-destination-rules.golden delete mode 100644 agent/xds/testdata/rbac/v2-single-permission-with-excludes--httpfilter.golden delete mode 100644 agent/xds/testdata/rbac/v2-single-permission-with-excludes.golden delete mode 100644 agent/xds/testdata/rbac/v2-single-permission-with-kitchen-sink-perms--httpfilter.golden delete mode 100644 agent/xds/testdata/rbac/v2-single-permission-with-kitchen-sink-perms.golden delete mode 100644 agent/xds/testdata/routes/access-logs-defaults.latest.golden delete mode 100644 agent/xds/testdata/routes/access-logs-json-file.latest.golden delete mode 100644 agent/xds/testdata/routes/access-logs-text-stderr-disablelistenerlogs.latest.golden delete mode 100644 agent/xds/testdata/routes/api-gateway-http-listener-with-http-route.latest.golden delete mode 100644 agent/xds/testdata/routes/api-gateway-http-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/api-gateway-nil-config-entry.latest.golden delete mode 100644 agent/xds/testdata/routes/api-gateway-tcp-listener-with-tcp-and-http-route.latest.golden delete mode 100644 agent/xds/testdata/routes/api-gateway-tcp-listener-with-tcp-route.latest.golden delete mode 100644 agent/xds/testdata/routes/api-gateway-tcp-listener.latest.golden rename agent/xds/testdata/routes/{api-gateway-with-http-route-timeoutfilter-one-set.latest.golden => api-gateway-with-http-route-and-inline-certificate.latest.golden} (77%) delete mode 100644 agent/xds/testdata/routes/api-gateway-with-http-route.latest.golden delete mode 100644 agent/xds/testdata/routes/api-gateway-with-multiple-inline-certificates.latest.golden delete mode 100644 agent/xds/testdata/routes/api-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-resolver-with-lb.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-route-to-lb-resolver.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-splitter-overweight.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-upstream-defaults.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-chain-and-failover.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-chain-http2.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-default-chain-and-custom-cluster.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-grpc-chain.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-http-chain.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-http2-chain.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-jwt-config-entry-with-local.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-jwt-config-entry-with-remote-jwks.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-peered-upstreams-escape-overrides.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-peered-upstreams-http2.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tcp-chain-double-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tcp-chain-double-failover-through-local-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tcp-chain-double-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tcp-chain-double-failover-through-remote-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tcp-chain-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tcp-chain-failover-through-local-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tcp-chain-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tcp-chain-failover-through-remote-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tcp-chain.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tls-incoming-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tls-incoming-max-version.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tls-incoming-min-version.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tls-outgoing-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tls-outgoing-max-version.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tls-outgoing-min-version-auto.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tls-outgoing-min-version.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-with-tproxy-and-permissive-mtls.latest.golden delete mode 100644 agent/xds/testdata/routes/connect-proxy-without-tproxy-and-permissive-mtls.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-limits-max-connections-only.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-limits-set-to-zero.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-limits.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-local-app.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-max-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-passive-healthcheck-zero-consecutive_5xx.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-passive-healthcheck.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-public-listener-http-2.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-public-listener-http-missing.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-public-listener-http.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-public-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-timeouts.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-trace-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-upstream-default-chain.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-upstream-ignored-with-disco-chain.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-upstream-with-prepared-query.latest.golden delete mode 100644 agent/xds/testdata/routes/custom-upstream.latest.golden delete mode 100644 agent/xds/testdata/routes/downstream-service-with-unix-sockets.latest.golden delete mode 100644 agent/xds/testdata/routes/expose-checks-grpc.latest.golden delete mode 100644 agent/xds/testdata/routes/expose-checks-http-with-bind-override.latest.golden delete mode 100644 agent/xds/testdata/routes/expose-checks-http.latest.golden delete mode 100644 agent/xds/testdata/routes/expose-checks.latest.golden delete mode 100644 agent/xds/testdata/routes/expose-paths-grpc-new-cluster-http1.latest.golden delete mode 100644 agent/xds/testdata/routes/expose-paths-local-app-paths.latest.golden delete mode 100644 agent/xds/testdata/routes/expose-paths-new-cluster-http2.latest.golden delete mode 100644 agent/xds/testdata/routes/grpc-public-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/http-listener-with-timeouts.latest.golden delete mode 100644 agent/xds/testdata/routes/http-public-listener-no-xfcc.latest.golden delete mode 100644 agent/xds/testdata/routes/http-public-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/http-upstream.latest.golden delete mode 100644 agent/xds/testdata/routes/http2-public-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-gateway-bind-addrs.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-gateway-nil-config-entry.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-gateway-no-services.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-gateway-with-tls-outgoing-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-gateway-with-tls-outgoing-max-version.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-gateway-with-tls-outgoing-min-version.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-multiple-listeners-duplicate-service.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-chain-and-failover-to-cluster-peer.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-chain-and-failover.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-defaults-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-defaults-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-grpc-single-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-http2-and-grpc-multiple-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-http2-single-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-overwrite-defaults-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-overwrite-defaults-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-sds-listener+service-level.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-sds-listener-gw-level-http.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-sds-listener-gw-level-mixed-tls.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-sds-listener-gw-level.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-sds-listener-listener-level.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-sds-service-level-2.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-sds-service-level-mixed-no-tls.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-service-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-single-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tcp-chain-double-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tcp-chain-double-failover-through-local-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tcp-chain-double-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tcp-chain-double-failover-through-remote-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tcp-chain-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tcp-chain-failover-through-local-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tcp-chain-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tcp-chain-failover-through-remote-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tls-listener-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tls-listener-max-version.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tls-listener-min-version.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tls-min-version-listeners-gateway-defaults.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tls-mixed-cipher-suites-listeners.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tls-mixed-listeners.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tls-mixed-max-version-listeners.latest.golden delete mode 100644 agent/xds/testdata/routes/ingress-with-tls-mixed-min-version-listeners.latest.golden delete mode 100644 agent/xds/testdata/routes/listener-balance-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/routes/listener-balance-outbound-connections-bind-port.latest.golden delete mode 100644 agent/xds/testdata/routes/listener-bind-address-port.latest.golden delete mode 100644 agent/xds/testdata/routes/listener-bind-address.latest.golden delete mode 100644 agent/xds/testdata/routes/listener-bind-port.latest.golden delete mode 100644 agent/xds/testdata/routes/listener-max-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/routes/listener-unix-domain-socket.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-custom-addresses.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-default-service-subset.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-hash-lb-ignored.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-ignore-extra-resolvers.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-newer-information-in-federation-states.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-no-services.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-non-hash-lb-injected.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-older-information-in-federation-states.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-service-subsets.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-service-subsets2.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-service-timeouts.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-tagged-addresses.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-tcp-keepalives.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-using-federation-control-plane.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway-using-federation-states.latest.golden delete mode 100644 agent/xds/testdata/routes/mesh-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-custom-and-tagged-addresses.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-custom-trace-listener.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-default-service-subset.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-hostname-service-subsets.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-http2-upstream-subsets.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-http2-upstream.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-ignore-extra-resolvers.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-lb-config-no-hash-policies.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-no-api-cert.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-no-services.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-service-subsets.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-sni.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-tcp-keepalives.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-with-peer-trust-bundle.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-with-tls-incoming-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-with-tls-incoming-max-version.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway-with-tls-incoming-min-version.latest.golden delete mode 100644 agent/xds/testdata/routes/terminating-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/transparent-proxy-catalog-destinations-only.latest.golden delete mode 100644 agent/xds/testdata/routes/transparent-proxy-dial-instances-directly.latest.golden delete mode 100644 agent/xds/testdata/routes/transparent-proxy-http-upstream.latest.golden delete mode 100644 agent/xds/testdata/routes/transparent-proxy-terminating-gateway.latest.golden delete mode 100644 agent/xds/testdata/routes/transparent-proxy-with-resolver-redirect-upstream.latest.golden delete mode 100644 agent/xds/testdata/routes/xds-fetch-timeout-ms-tproxy-http-peering.latest.golden delete mode 100644 agent/xds/testdata/secrets/access-logs-defaults.latest.golden delete mode 100644 agent/xds/testdata/secrets/access-logs-json-file.latest.golden delete mode 100644 agent/xds/testdata/secrets/access-logs-text-stderr-disablelistenerlogs.latest.golden delete mode 100644 agent/xds/testdata/secrets/api-gateway-http-listener-with-http-route.latest.golden delete mode 100644 agent/xds/testdata/secrets/api-gateway-http-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/api-gateway-nil-config-entry.latest.golden delete mode 100644 agent/xds/testdata/secrets/api-gateway-tcp-listener-with-tcp-and-http-route.latest.golden delete mode 100644 agent/xds/testdata/secrets/api-gateway-tcp-listener-with-tcp-route.latest.golden delete mode 100644 agent/xds/testdata/secrets/api-gateway-tcp-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/api-gateway-with-http-route-timeoutfilter-one-set.latest.golden delete mode 100644 agent/xds/testdata/secrets/api-gateway-with-http-route.latest.golden delete mode 100644 agent/xds/testdata/secrets/api-gateway-with-multiple-hostnames.latest.golden delete mode 100644 agent/xds/testdata/secrets/api-gateway-with-multiple-inline-certificates.latest.golden delete mode 100644 agent/xds/testdata/secrets/api-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-lb-in-resolver.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-resolver-with-lb.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-route-to-lb-resolver.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-splitter-overweight.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-upstream-defaults.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-chain-and-failover.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-chain-and-overrides.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-chain-and-router.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-chain-and-splitter.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-chain-external-sni.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-chain-http2.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-chain.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-default-chain-and-custom-cluster.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-grpc-chain.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-grpc-router.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-http-chain.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-http2-chain.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-jwt-config-entry-with-local.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-jwt-config-entry-with-remote-jwks.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-peered-upstreams-escape-overrides.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-peered-upstreams-http2.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-peered-upstreams-listener-override.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tcp-chain-double-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tcp-chain-double-failover-through-local-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tcp-chain-double-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tcp-chain-double-failover-through-remote-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tcp-chain-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tcp-chain-failover-through-local-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tcp-chain-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tcp-chain-failover-through-remote-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tcp-chain.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tls-incoming-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tls-incoming-max-version.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tls-incoming-min-version.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tls-outgoing-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tls-outgoing-max-version.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tls-outgoing-min-version-auto.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tls-outgoing-min-version.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-with-tproxy-and-permissive-mtls.latest.golden delete mode 100644 agent/xds/testdata/secrets/connect-proxy-without-tproxy-and-permissive-mtls.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-limits-max-connections-only.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-limits-set-to-zero.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-limits.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-local-app.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-max-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-passive-healthcheck-zero-consecutive_5xx.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-passive-healthcheck.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-public-listener-http-2.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-public-listener-http-missing.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-public-listener-http.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-public-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-timeouts.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-trace-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-upstream-default-chain.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-upstream-ignored-with-disco-chain.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-upstream-with-prepared-query.latest.golden delete mode 100644 agent/xds/testdata/secrets/custom-upstream.latest.golden delete mode 100644 agent/xds/testdata/secrets/downstream-service-with-unix-sockets.latest.golden delete mode 100644 agent/xds/testdata/secrets/expose-checks-grpc.latest.golden delete mode 100644 agent/xds/testdata/secrets/expose-checks-http-with-bind-override.latest.golden delete mode 100644 agent/xds/testdata/secrets/expose-checks-http.latest.golden delete mode 100644 agent/xds/testdata/secrets/expose-checks.latest.golden delete mode 100644 agent/xds/testdata/secrets/expose-paths-grpc-new-cluster-http1.latest.golden delete mode 100644 agent/xds/testdata/secrets/expose-paths-local-app-paths.latest.golden delete mode 100644 agent/xds/testdata/secrets/expose-paths-new-cluster-http2.latest.golden delete mode 100644 agent/xds/testdata/secrets/grpc-public-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/http-listener-with-timeouts.latest.golden delete mode 100644 agent/xds/testdata/secrets/http-public-listener-no-xfcc.latest.golden delete mode 100644 agent/xds/testdata/secrets/http-public-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/http-upstream.latest.golden delete mode 100644 agent/xds/testdata/secrets/http2-public-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-config-entry-nil.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-defaults-no-chain.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-gateway-bind-addrs.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-gateway-nil-config-entry.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-gateway-no-services.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-gateway-with-tls-outgoing-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-gateway-with-tls-outgoing-max-version.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-gateway-with-tls-outgoing-min-version.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-grpc-multiple-services.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-http-multiple-services.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-lb-in-resolver.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-multiple-listeners-duplicate-service.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-splitter-with-resolver-redirect.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-chain-and-failover-to-cluster-peer.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-chain-and-failover.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-chain-and-router-header-manip.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-chain-and-router.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-chain-and-splitter.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-chain-external-sni.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-chain.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-defaults-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-defaults-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-grpc-router.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-grpc-single-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-http2-and-grpc-multiple-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-http2-single-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-overwrite-defaults-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-overwrite-defaults-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-sds-listener+service-level.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-sds-listener-gw-level-http.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-sds-listener-gw-level-mixed-tls.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-sds-listener-gw-level.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-sds-listener-level-wildcard.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-sds-listener-level.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-sds-listener-listener-level.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-sds-service-level-2.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-sds-service-level-mixed-no-tls.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-sds-service-level-mixed-tls.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-sds-service-level.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-service-max-connections.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-service-passive-health-check.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-single-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tcp-chain-double-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tcp-chain-double-failover-through-local-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tcp-chain-double-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tcp-chain-double-failover-through-remote-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tcp-chain-failover-through-local-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tcp-chain-failover-through-local-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tcp-chain-failover-through-remote-gateway-triggered.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tcp-chain-failover-through-remote-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tls-listener-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tls-listener-max-version.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tls-listener-min-version.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tls-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tls-min-version-listeners-gateway-defaults.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tls-mixed-cipher-suites-listeners.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tls-mixed-listeners.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tls-mixed-max-version-listeners.latest.golden delete mode 100644 agent/xds/testdata/secrets/ingress-with-tls-mixed-min-version-listeners.latest.golden delete mode 100644 agent/xds/testdata/secrets/listener-balance-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/secrets/listener-balance-outbound-connections-bind-port.latest.golden delete mode 100644 agent/xds/testdata/secrets/listener-bind-address-port.latest.golden delete mode 100644 agent/xds/testdata/secrets/listener-bind-address.latest.golden delete mode 100644 agent/xds/testdata/secrets/listener-bind-port.latest.golden delete mode 100644 agent/xds/testdata/secrets/listener-max-inbound-connections.latest.golden delete mode 100644 agent/xds/testdata/secrets/listener-unix-domain-socket.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-custom-addresses.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-default-service-subset.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-hash-lb-ignored.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-ignore-extra-resolvers.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-newer-information-in-federation-states.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-no-services.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-non-hash-lb-injected.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-older-information-in-federation-states.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-service-subsets.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-service-subsets2.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-service-timeouts.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-tagged-addresses.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-tcp-keepalives.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-using-federation-control-plane.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway-using-federation-states.latest.golden delete mode 100644 agent/xds/testdata/secrets/mesh-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/splitter-with-resolver-redirect.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-custom-and-tagged-addresses.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-custom-trace-listener.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-default-service-subset.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-hostname-service-subsets.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-http2-upstream-subsets.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-http2-upstream.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-ignore-extra-resolvers.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-lb-config-no-hash-policies.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-lb-config.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-no-api-cert.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-no-services.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-service-subsets.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-sni.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-tcp-keepalives.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-with-peer-trust-bundle.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-with-tls-incoming-cipher-suites.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-with-tls-incoming-max-version.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway-with-tls-incoming-min-version.latest.golden delete mode 100644 agent/xds/testdata/secrets/terminating-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/transparent-proxy-catalog-destinations-only.latest.golden delete mode 100644 agent/xds/testdata/secrets/transparent-proxy-dial-instances-directly.latest.golden delete mode 100644 agent/xds/testdata/secrets/transparent-proxy-http-upstream.latest.golden delete mode 100644 agent/xds/testdata/secrets/transparent-proxy-terminating-gateway.latest.golden delete mode 100644 agent/xds/testdata/secrets/transparent-proxy-with-resolver-redirect-upstream.latest.golden delete mode 100644 agent/xds/testdata/secrets/xds-fetch-timeout-ms-tproxy-http-peering.latest.golden delete mode 100644 agent/xdsv2/cluster_resources.go delete mode 100644 agent/xdsv2/endpoint_resources.go delete mode 100644 agent/xdsv2/listener_resources.go delete mode 100644 agent/xdsv2/rbac_resources.go delete mode 100644 agent/xdsv2/resources.go delete mode 100644 agent/xdsv2/resources_test.go delete mode 100644 agent/xdsv2/route_resources.go delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multi-destination-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multi-destination-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multi-destination-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multi-destination-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-ip-port-bind-address-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-ip-port-bind-address-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-ip-port-bind-address-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-unix-socket-bind-address-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-implicit-destination-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/mixed-multi-destination-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/mixed-multi-destination-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/mixed-multi-destination-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/mixed-multi-destination-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/l7-expose-paths-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/l7-expose-paths-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/l7-expose-paths-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/l7-expose-paths-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/local-and-inbound-connections-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/local-and-inbound-connections-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/local-and-inbound-connections-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/local-and-inbound-connections-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-workload-with-only-mesh-port-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/single-workload-address-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/single-workload-address-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/single-workload-address-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/clusters/source/single-workload-address-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multi-destination-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multi-destination-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multi-destination-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multi-destination-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-ip-port-bind-address-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-ip-port-bind-address-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-ip-port-bind-address-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-unix-socket-bind-address-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-implicit-destination-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/mixed-multi-destination-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/mixed-multi-destination-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/mixed-multi-destination-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/mixed-multi-destination-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/l7-expose-paths-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/l7-expose-paths-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/l7-expose-paths-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/l7-expose-paths-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/local-and-inbound-connections-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/local-and-inbound-connections-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/local-and-inbound-connections-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/local-and-inbound-connections-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-workload-with-only-mesh-port-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/single-workload-address-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/single-workload-address-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/single-workload-address-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/endpoints/source/single-workload-address-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multi-destination-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multi-destination-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multi-destination-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multi-destination-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-ip-port-bind-address-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-ip-port-bind-address-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-ip-port-bind-address-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-unix-socket-bind-address-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-implicit-destination-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/mixed-multi-destination-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/mixed-multi-destination-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/mixed-multi-destination-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/mixed-multi-destination-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/l7-expose-paths-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/l7-expose-paths-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/l7-expose-paths-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/l7-expose-paths-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/local-and-inbound-connections-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/local-and-inbound-connections-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/local-and-inbound-connections-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/local-and-inbound-connections-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-workload-with-only-mesh-port-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/single-workload-address-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/single-workload-address-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/single-workload-address-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/listeners/source/single-workload-address-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-multi-destination-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-multi-destination-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-multi-destination-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-multi-destination-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-ip-port-bind-address-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-ip-port-bind-address-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-ip-port-bind-address-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-unix-socket-bind-address-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-implicit-destination-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/mixed-multi-destination-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/mixed-multi-destination-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/mixed-multi-destination-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/mixed-multi-destination-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/l7-expose-paths-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/l7-expose-paths-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/l7-expose-paths-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/l7-expose-paths-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/local-and-inbound-connections-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/local-and-inbound-connections-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/local-and-inbound-connections-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/local-and-inbound-connections-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-workload-with-only-mesh-port-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/single-workload-address-without-ports-default-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/single-workload-address-without-ports-default-default.golden delete mode 100644 agent/xdsv2/testdata/routes/source/single-workload-address-without-ports-foo-bar.golden delete mode 100644 agent/xdsv2/testdata/routes/source/single-workload-address-without-ports-foo-default.golden delete mode 100644 api/LICENSE delete mode 100644 api/config_entry_routes_test.go delete mode 100644 api/exported_services.go delete mode 100755 build-support/scripts/check-allowed-imports.sh delete mode 100644 build-support/windows/Dockerfile-consul-dev-windows delete mode 100644 build-support/windows/Dockerfile-consul-local-windows delete mode 100644 build-support/windows/Dockerfile-openzipkin-windows delete mode 100644 build-support/windows/build-consul-dev-image.sh delete mode 100644 build-support/windows/build-consul-local-images.sh delete mode 100644 build-support/windows/build-test-sds-server-image.sh delete mode 100644 build-support/windows/windows-test.md delete mode 100644 command/acl/templatedpolicy/formatter.go delete mode 100644 command/acl/templatedpolicy/formatter_ce_test.go delete mode 100644 command/acl/templatedpolicy/formatter_test.go delete mode 100644 command/acl/templatedpolicy/list/templated_policy_list.go delete mode 100644 command/acl/templatedpolicy/list/templated_policy_list_test.go delete mode 100644 command/acl/templatedpolicy/preview/templated_policy_preview.go delete mode 100644 command/acl/templatedpolicy/preview/templated_policy_preview_test.go delete mode 100644 command/acl/templatedpolicy/read/templated_policy_read.go delete mode 100644 command/acl/templatedpolicy/read/templated_policy_read_test.go delete mode 100644 command/acl/templatedpolicy/templated_policy.go delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/dns-templated-policy.json.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/dns-templated-policy.pretty-meta.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/dns-templated-policy.pretty.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/node-templated-policy.json.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/node-templated-policy.pretty-meta.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/node-templated-policy.pretty.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/nomad-client-templated-policy.json.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/nomad-client-templated-policy.pretty-meta.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/nomad-client-templated-policy.pretty.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/nomad-server-templated-policy.json.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/nomad-server-templated-policy.pretty-meta.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/nomad-server-templated-policy.pretty.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/service-templated-policy.json.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/service-templated-policy.pretty-meta.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicy/ce/service-templated-policy.pretty.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicyList/ce/list.json.golden delete mode 100644 command/acl/templatedpolicy/testdata/FormatTemplatedPolicyList/ce/list.pretty.golden delete mode 100644 command/connect/envoy/exec_supported.go delete mode 100644 command/connect/envoy/exec_windows.go delete mode 100644 command/peering/exportedservices/exported_services.go delete mode 100644 command/peering/exportedservices/exported_services_test.go delete mode 100644 command/resource/apply/apply.go delete mode 100644 command/resource/apply/apply_test.go delete mode 100644 command/resource/client/client.go delete mode 100644 command/resource/client/client_test.go delete mode 100644 command/resource/client/config.go delete mode 100644 command/resource/client/config_test.go delete mode 100644 command/resource/client/grpc-flags.go delete mode 100644 command/resource/client/grpc-flags_test.go delete mode 100644 command/resource/client/helper.go delete mode 100644 command/resource/client/helper_test.go delete mode 100644 command/resource/client/resource-flags.go delete mode 100644 command/resource/client/usage.go delete mode 100644 command/resource/delete/delete.go delete mode 100644 command/resource/delete/delete_test.go delete mode 100644 command/resource/list/list.go delete mode 100644 command/resource/list/list_test.go delete mode 100644 command/resource/read/read.go delete mode 100644 command/resource/read/read_test.go delete mode 100644 command/resource/resource.go delete mode 100644 command/resource/testdata/demo.hcl delete mode 100644 command/resource/testdata/invalid.hcl delete mode 100644 command/resource/testdata/invalid_type.hcl delete mode 100644 command/resource/testdata/nested_data.hcl delete mode 100644 command/services/exportedservices/exported_services.go delete mode 100644 command/services/exportedservices/exported_services_test.go rename docs/{v2-architecture/controller-architecture => resources}/README.md (77%) rename docs/{v2-architecture/controller-architecture => resources}/architecture-overview.png (100%) rename docs/{v2-architecture/controller-architecture => resources}/guide.md (62%) rename docs/{v2-architecture/controller-architecture => resources}/raft-backend.png (100%) delete mode 100644 docs/v2-architecture/controller-architecture/controllers.md delete mode 100644 docs/v2-architecture/controller-architecture/testing.md delete mode 100644 docs/v2-architecture/service-mesh/README.md delete mode 100644 docs/v2-architecture/service-mesh/controllers.png create mode 100644 fixup_acl_move.sh delete mode 100644 grafana/consul-k8s-control-plane-monitoring.json delete mode 100644 grpcmocks/proto-public/pbacl/mock_ACLServiceClient.go delete mode 100644 grpcmocks/proto-public/pbacl/mock_ACLServiceServer.go delete mode 100644 grpcmocks/proto-public/pbacl/mock_UnsafeACLServiceServer.go delete mode 100644 grpcmocks/proto-public/pbconnectca/mock_ConnectCAServiceClient.go delete mode 100644 grpcmocks/proto-public/pbconnectca/mock_ConnectCAServiceServer.go delete mode 100644 grpcmocks/proto-public/pbconnectca/mock_ConnectCAService_WatchRootsClient.go delete mode 100644 grpcmocks/proto-public/pbconnectca/mock_ConnectCAService_WatchRootsServer.go delete mode 100644 grpcmocks/proto-public/pbconnectca/mock_UnsafeConnectCAServiceServer.go delete mode 100644 grpcmocks/proto-public/pbdataplane/mock_DataplaneServiceClient.go delete mode 100644 grpcmocks/proto-public/pbdataplane/mock_DataplaneServiceServer.go delete mode 100644 grpcmocks/proto-public/pbdataplane/mock_UnsafeDataplaneServiceServer.go delete mode 100644 grpcmocks/proto-public/pbdataplane/mock_isGetEnvoyBootstrapParamsRequest_NodeSpec.go delete mode 100644 grpcmocks/proto-public/pbdns/mock_DNSServiceClient.go delete mode 100644 grpcmocks/proto-public/pbdns/mock_DNSServiceServer.go delete mode 100644 grpcmocks/proto-public/pbdns/mock_UnsafeDNSServiceServer.go delete mode 100644 grpcmocks/proto-public/pbresource/mock_ResourceServiceClient.go delete mode 100644 grpcmocks/proto-public/pbresource/mock_ResourceServiceServer.go delete mode 100644 grpcmocks/proto-public/pbresource/mock_ResourceService_WatchListClient.go delete mode 100644 grpcmocks/proto-public/pbresource/mock_ResourceService_WatchListServer.go delete mode 100644 grpcmocks/proto-public/pbresource/mock_UnsafeResourceServiceServer.go delete mode 100644 grpcmocks/proto-public/pbresource/mock_isWatchEvent_Event.go delete mode 100644 grpcmocks/proto-public/pbserverdiscovery/mock_ServerDiscoveryServiceClient.go delete mode 100644 grpcmocks/proto-public/pbserverdiscovery/mock_ServerDiscoveryServiceServer.go delete mode 100644 grpcmocks/proto-public/pbserverdiscovery/mock_ServerDiscoveryService_WatchServersClient.go delete mode 100644 grpcmocks/proto-public/pbserverdiscovery/mock_ServerDiscoveryService_WatchServersServer.go delete mode 100644 grpcmocks/proto-public/pbserverdiscovery/mock_UnsafeServerDiscoveryServiceServer.go delete mode 100644 internal/auth/exports.go delete mode 100644 internal/auth/internal/controllers/register.go delete mode 100644 internal/auth/internal/controllers/trafficpermissions/builder.go delete mode 100644 internal/auth/internal/controllers/trafficpermissions/controller.go delete mode 100644 internal/auth/internal/controllers/trafficpermissions/controller_test.go delete mode 100644 internal/auth/internal/controllers/trafficpermissions/expander/expander_ce.go delete mode 100644 internal/auth/internal/controllers/trafficpermissions/expander/expander_ce/expander_ce.go delete mode 100644 internal/auth/internal/controllers/trafficpermissions/expander/interface.go delete mode 100644 internal/auth/internal/controllers/trafficpermissions/helpers_ce.go delete mode 100644 internal/auth/internal/controllers/trafficpermissions/index.go delete mode 100644 internal/auth/internal/controllers/trafficpermissions/status.go delete mode 100644 internal/auth/internal/mappers/trafficpermissionsmapper/traffic_permissions_mapper.go delete mode 100644 internal/auth/internal/types/computed_traffic_permissions.go delete mode 100644 internal/auth/internal/types/computed_traffic_permissions_test.go delete mode 100644 internal/auth/internal/types/errors.go delete mode 100644 internal/auth/internal/types/namespace_traffic_permissions.go delete mode 100644 internal/auth/internal/types/namespace_traffic_permissions_test.go delete mode 100644 internal/auth/internal/types/partition_traffic_permissions.go delete mode 100644 internal/auth/internal/types/partition_traffic_permissions_test.go delete mode 100644 internal/auth/internal/types/traffic_permissions.go delete mode 100644 internal/auth/internal/types/traffic_permissions_test.go delete mode 100644 internal/auth/internal/types/types.go delete mode 100644 internal/auth/internal/types/validate.go delete mode 100644 internal/auth/internal/types/validate_ce.go delete mode 100644 internal/auth/internal/types/workload_identity.go delete mode 100644 internal/auth/internal/types/workload_identity_test.go delete mode 100644 internal/catalog/catalogtest/helpers/acl_hooks_test_helpers.go rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-service.json (78%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-1-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-1.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-10-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-10.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-11-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-11.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-12-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-12.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-13-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-13.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-14-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-14.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-15-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-15.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-16-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-16.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-17-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-17.json (82%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-18-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-18.json (82%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-19-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-19.json (82%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-2-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-2.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-20-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-20.json (82%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-3-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-3.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-4-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-4.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-5-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-5.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-6-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-6.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-7-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-7.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-8-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-8.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-9-health.json (63%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/api-workload-9.json (83%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/foo-service-endpoints.json (70%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/foo-service.json (57%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/grpc-api-service.json (81%) rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/http-api-service.json (71%) create mode 100644 internal/catalog/catalogtest/integration_test_data/v1alpha1/node-1-health.json rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/node-1.json (63%) create mode 100644 internal/catalog/catalogtest/integration_test_data/v1alpha1/node-2-health.json rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/node-2.json (63%) create mode 100644 internal/catalog/catalogtest/integration_test_data/v1alpha1/node-3-health.json rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/node-3.json (63%) create mode 100644 internal/catalog/catalogtest/integration_test_data/v1alpha1/node-4-health.json rename internal/catalog/catalogtest/integration_test_data/{v2beta1 => v1alpha1}/node-4.json (63%) delete mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-1-health.json delete mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-2-health.json delete mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-3-health.json delete mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-4-health.json rename internal/catalog/catalogtest/{test_integration_v2beta1.go => test_integration_v1alpha1.go} (63%) delete mode 100644 internal/catalog/catalogtest/test_lifecycle_v2beta1.go delete mode 100644 internal/catalog/internal/controllers/endpoints/bound.go delete mode 100644 internal/catalog/internal/controllers/endpoints/bound_test.go create mode 100644 internal/catalog/internal/controllers/endpoints/reconciliation_data.go create mode 100644 internal/catalog/internal/controllers/endpoints/reconciliation_data_test.go delete mode 100644 internal/catalog/internal/controllers/failover/controller.go delete mode 100644 internal/catalog/internal/controllers/failover/controller_test.go delete mode 100644 internal/catalog/internal/controllers/failover/expander/expander_ce.go delete mode 100644 internal/catalog/internal/controllers/failover/expander/expander_ce/expander.go delete mode 100644 internal/catalog/internal/controllers/failover/expander/expander_ce/expander_test.go delete mode 100644 internal/catalog/internal/controllers/failover/expander/interface.go delete mode 100644 internal/catalog/internal/controllers/failover/helpers_ce.go delete mode 100644 internal/catalog/internal/controllers/failover/status.go create mode 100644 internal/catalog/internal/mappers/nodemapper/node_mapper.go create mode 100644 internal/catalog/internal/mappers/nodemapper/node_mapper_test.go rename internal/{resource => catalog/internal}/mappers/selectiontracker/selection_tracker.go (55%) create mode 100644 internal/catalog/internal/mappers/selectiontracker/selection_tracker_test.go delete mode 100644 internal/catalog/internal/testhelpers/acl_hooks_test_helpers.go delete mode 100644 internal/catalog/internal/types/computed_failover_policy.go delete mode 100644 internal/catalog/internal/types/computed_failover_policy_test.go create mode 100644 internal/catalog/internal/types/dns_policy.go create mode 100644 internal/catalog/internal/types/dns_policy_test.go delete mode 100644 internal/catalog/internal/types/failover_policy.go delete mode 100644 internal/catalog/internal/types/failover_policy_test.go delete mode 100644 internal/catalog/internal/types/node_health_status.go delete mode 100644 internal/catalog/internal/types/node_health_status_test.go delete mode 100644 internal/catalog/workloadselector/acls.go delete mode 100644 internal/catalog/workloadselector/acls_test.go delete mode 100644 internal/catalog/workloadselector/gather.go delete mode 100644 internal/catalog/workloadselector/gather_test.go delete mode 100644 internal/catalog/workloadselector/index.go delete mode 100644 internal/catalog/workloadselector/index_test.go delete mode 100644 internal/catalog/workloadselector/integ_test.go delete mode 100644 internal/catalog/workloadselector/mapper.go delete mode 100644 internal/catalog/workloadselector/mapper_test.go delete mode 100644 internal/catalog/workloadselector/selecting.go delete mode 100644 internal/controller/.mockery.yaml create mode 100644 internal/controller/api.go create mode 100644 internal/controller/api_test.go delete mode 100644 internal/controller/cache/.mockery.yaml delete mode 100644 internal/controller/cache/cache.go delete mode 100644 internal/controller/cache/cache_test.go delete mode 100644 internal/controller/cache/cachemock/mock_Cache.go delete mode 100644 internal/controller/cache/cachemock/mock_Query.go delete mode 100644 internal/controller/cache/cachemock/mock_ReadOnlyCache.go delete mode 100644 internal/controller/cache/cachemock/mock_ResourceIterator.go delete mode 100644 internal/controller/cache/cachemock/mock_WriteCache.go delete mode 100644 internal/controller/cache/client.go delete mode 100644 internal/controller/cache/client_test.go delete mode 100644 internal/controller/cache/clone.go delete mode 100644 internal/controller/cache/clone_test.go delete mode 100644 internal/controller/cache/decoded.go delete mode 100644 internal/controller/cache/decoded_test.go delete mode 100644 internal/controller/cache/errors.go delete mode 100644 internal/controller/cache/errors_test.go delete mode 100644 internal/controller/cache/index/.mockery.yaml delete mode 100644 internal/controller/cache/index/builder.go delete mode 100644 internal/controller/cache/index/builder_test.go delete mode 100644 internal/controller/cache/index/convenience.go delete mode 100644 internal/controller/cache/index/convenience_test.go delete mode 100644 internal/controller/cache/index/errors.go delete mode 100644 internal/controller/cache/index/errors_test.go delete mode 100644 internal/controller/cache/index/index.go delete mode 100644 internal/controller/cache/index/index_test.go delete mode 100644 internal/controller/cache/index/indexmock/mock_Indexer.go delete mode 100644 internal/controller/cache/index/indexmock/mock_MultiIndexer.go delete mode 100644 internal/controller/cache/index/indexmock/mock_ResourceIterator.go delete mode 100644 internal/controller/cache/index/indexmock/mock_SingleIndexer.go delete mode 100644 internal/controller/cache/index/indexmock/mock_resourceIterable.go delete mode 100644 internal/controller/cache/index/interfaces.go delete mode 100644 internal/controller/cache/index/iterator.go delete mode 100644 internal/controller/cache/index/iterator_test.go delete mode 100644 internal/controller/cache/index/testdata/MissingRequiredIndex.golden delete mode 100644 internal/controller/cache/index/txn.go delete mode 100644 internal/controller/cache/index/txn_test.go delete mode 100644 internal/controller/cache/indexers/.mockery.yaml delete mode 100644 internal/controller/cache/indexers/decoded_indexer.go delete mode 100644 internal/controller/cache/indexers/decoded_indexer_test.go delete mode 100644 internal/controller/cache/indexers/id_indexer.go delete mode 100644 internal/controller/cache/indexers/id_indexer_test.go delete mode 100644 internal/controller/cache/indexers/indexersmock/mock_BoundReferences.go delete mode 100644 internal/controller/cache/indexers/indexersmock/mock_FromArgs.go delete mode 100644 internal/controller/cache/indexers/indexersmock/mock_GetSingleRefOrID.go delete mode 100644 internal/controller/cache/indexers/indexersmock/mock_MultiIndexer.go delete mode 100644 internal/controller/cache/indexers/indexersmock/mock_RefOrIDFetcher.go delete mode 100644 internal/controller/cache/indexers/indexersmock/mock_SingleIndexer.go delete mode 100644 internal/controller/cache/indexers/ref_indexer.go delete mode 100644 internal/controller/cache/indexers/ref_indexer_test.go delete mode 100644 internal/controller/cache/kind.go delete mode 100644 internal/controller/cache/kind_test.go delete mode 100644 internal/controller/cache/testdata/CacheTypeError.golden delete mode 100644 internal/controller/cache/testdata/DuplicateIndexError.golden delete mode 100644 internal/controller/cache/testdata/DuplicateQueryError.golden delete mode 100644 internal/controller/cache/testdata/IndexError.golden delete mode 100644 internal/controller/cache/testdata/IndexNotFound.golden delete mode 100644 internal/controller/cache/testdata/QueryNotFound.golden delete mode 100644 internal/controller/cache/testdata/QueryRequired.golden delete mode 100644 internal/controller/controller_test.go delete mode 100644 internal/controller/controllermock/mock_CacheIDModifier.go delete mode 100644 internal/controller/controllermock/mock_CustomDependencyMapper.go delete mode 100644 internal/controller/controllermock/mock_DependencyMapper.go delete mode 100644 internal/controller/controllermock/mock_DependencyTransform.go delete mode 100644 internal/controller/controllermock/mock_Lease.go delete mode 100644 internal/controller/controllermock/mock_Reconciler.go delete mode 100644 internal/controller/controllermock/mock_task.go delete mode 100644 internal/controller/controllertest/builder.go delete mode 100644 internal/controller/custom_watch.go delete mode 100644 internal/controller/dependencies.go delete mode 100644 internal/controller/dependencies_test.go delete mode 100644 internal/controller/dependency/.mockery.yaml delete mode 100644 internal/controller/dependency/cache.go delete mode 100644 internal/controller/dependency/cache_test.go delete mode 100644 internal/controller/dependency/dependencymock/mock_CacheIDModifier.go delete mode 100644 internal/controller/dependency/dependencymock/mock_DependencyTransform.go delete mode 100644 internal/controller/dependency/higher_order.go delete mode 100644 internal/controller/dependency/higher_order_test.go delete mode 100644 internal/controller/dependency/simple.go delete mode 100644 internal/controller/dependency/transform.go delete mode 100644 internal/controller/dependency/transform_test.go create mode 100644 internal/controller/dependency_mappers.go rename internal/controller/{dependency/simple_test.go => dependency_mappers_test.go} (59%) delete mode 100644 internal/controller/helper.go delete mode 100644 internal/controller/helper_test.go delete mode 100644 internal/controller/mem_consistency_test.go delete mode 100644 internal/controller/runner.go delete mode 100644 internal/controller/testdata/dependencies.golden delete mode 100644 internal/controller/testing.go delete mode 100644 internal/controller/watch.go delete mode 100644 internal/dnsutil/dns.go delete mode 100644 internal/dnsutil/dns_test.go delete mode 100644 internal/hcp/exports.go delete mode 100644 internal/hcp/internal/controllers/link/controller.go delete mode 100644 internal/hcp/internal/controllers/link/controller_test.go delete mode 100644 internal/hcp/internal/controllers/link/status.go delete mode 100644 internal/hcp/internal/controllers/register.go delete mode 100644 internal/hcp/internal/controllers/telemetrystate/controller.go delete mode 100644 internal/hcp/internal/controllers/telemetrystate/controller_test.go delete mode 100644 internal/hcp/internal/controllers/telemetrystate/status.go delete mode 100644 internal/hcp/internal/types/link.go delete mode 100644 internal/hcp/internal/types/link_test.go delete mode 100644 internal/hcp/internal/types/telemetry_state.go delete mode 100644 internal/hcp/internal/types/testing.go delete mode 100644 internal/hcp/internal/types/types.go delete mode 100644 internal/mesh/internal/controllers/apigateways/controller.go delete mode 100644 internal/mesh/internal/controllers/apigateways/controller_test.go delete mode 100644 internal/mesh/internal/controllers/apigateways/fetcher/data_fetcher.go delete mode 100644 internal/mesh/internal/controllers/apigateways/fetcher/data_fetcher_test.go delete mode 100644 internal/mesh/internal/controllers/explicitdestinations/controller.go delete mode 100644 internal/mesh/internal/controllers/explicitdestinations/controller_test.go delete mode 100644 internal/mesh/internal/controllers/explicitdestinations/mapper/mapper.go delete mode 100644 internal/mesh/internal/controllers/explicitdestinations/status.go delete mode 100644 internal/mesh/internal/controllers/gatewayproxy/builder/api_gateway_builder.go delete mode 100644 internal/mesh/internal/controllers/gatewayproxy/builder/mesh_gateway_builder.go delete mode 100644 internal/mesh/internal/controllers/gatewayproxy/builder/mesh_gateway_builder_test.go delete mode 100644 internal/mesh/internal/controllers/gatewayproxy/controller.go delete mode 100644 internal/mesh/internal/controllers/gatewayproxy/controller_test.go delete mode 100644 internal/mesh/internal/controllers/gatewayproxy/fetcher/data_fetcher.go delete mode 100644 internal/mesh/internal/controllers/gatewayproxy/fetcher/data_fetcher_test.go delete mode 100644 internal/mesh/internal/controllers/gatewayproxy/mapper/apigatewayworkloads.go delete mode 100644 internal/mesh/internal/controllers/gatewayproxy/mapper/meshgatewayworkloads.go delete mode 100644 internal/mesh/internal/controllers/implicitdestinations/auth_helper_test.go delete mode 100644 internal/mesh/internal/controllers/implicitdestinations/controller.go delete mode 100644 internal/mesh/internal/controllers/implicitdestinations/controller_test.go delete mode 100644 internal/mesh/internal/controllers/implicitdestinations/index.go delete mode 100644 internal/mesh/internal/controllers/implicitdestinations/index_test.go delete mode 100644 internal/mesh/internal/controllers/implicitdestinations/mapper.go delete mode 100644 internal/mesh/internal/controllers/implicitdestinations/status.go delete mode 100644 internal/mesh/internal/controllers/meshconfiguration/controller.go delete mode 100644 internal/mesh/internal/controllers/meshconfiguration/controller_test.go delete mode 100644 internal/mesh/internal/controllers/meshgateways/controller.go delete mode 100644 internal/mesh/internal/controllers/proxyconfiguration/controller.go delete mode 100644 internal/mesh/internal/controllers/proxyconfiguration/controller_test.go delete mode 100644 internal/mesh/internal/controllers/proxyconfiguration/sort.go delete mode 100644 internal/mesh/internal/controllers/proxyconfiguration/sort_test.go delete mode 100644 internal/mesh/internal/controllers/register.go delete mode 100644 internal/mesh/internal/controllers/routes/controller.go delete mode 100644 internal/mesh/internal/controllers/routes/controller_test.go delete mode 100644 internal/mesh/internal/controllers/routes/destination_policy_validation.go delete mode 100644 internal/mesh/internal/controllers/routes/destination_policy_validation_test.go delete mode 100644 internal/mesh/internal/controllers/routes/generate.go delete mode 100644 internal/mesh/internal/controllers/routes/generate_test.go delete mode 100644 internal/mesh/internal/controllers/routes/intermediate.go delete mode 100644 internal/mesh/internal/controllers/routes/loader/loader.go delete mode 100644 internal/mesh/internal/controllers/routes/loader/loader_test.go delete mode 100644 internal/mesh/internal/controllers/routes/loader/memoized.go delete mode 100644 internal/mesh/internal/controllers/routes/loader/related.go delete mode 100644 internal/mesh/internal/controllers/routes/pending_status.go delete mode 100644 internal/mesh/internal/controllers/routes/ref_validation.go delete mode 100644 internal/mesh/internal/controllers/routes/ref_validation_test.go delete mode 100644 internal/mesh/internal/controllers/routes/routestest/routestest.go delete mode 100644 internal/mesh/internal/controllers/routes/sort_rules.go delete mode 100644 internal/mesh/internal/controllers/routes/sort_rules_test.go delete mode 100644 internal/mesh/internal/controllers/routes/status.go delete mode 100644 internal/mesh/internal/controllers/routes/util.go delete mode 100644 internal/mesh/internal/controllers/routes/xroutemapper/.mockery.yaml delete mode 100644 internal/mesh/internal/controllers/routes/xroutemapper/util.go delete mode 100644 internal/mesh/internal/controllers/routes/xroutemapper/xroutemapper.go delete mode 100644 internal/mesh/internal/controllers/routes/xroutemapper/xroutemapper_test.go delete mode 100644 internal/mesh/internal/controllers/routes/xroutemapper/xroutemappermock/mock_ResolveFailoverServiceDestinations.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/builder.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/destination_multiport_test.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/destinations.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/destinations_test.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/expose_paths.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/expose_paths_test.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/local_app_multiport_test.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/naming.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/routes.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multi-destination-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multi-destination-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multi-destination-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multi-destination-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-ip-port-bind-address-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-ip-port-bind-address-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-ip-port-bind-address-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-unix-socket-bind-address-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-implicit-destination-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/l7-expose-paths-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/l7-expose-paths-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/l7-expose-paths-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/l7-expose-paths-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/l7-expose-paths.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-with-specific-ports.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-without-ports.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-without-ports.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-single-workload-address-without-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-single-workload-address-without-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-single-workload-address-without-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-single-workload-address-without-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-single-workload-address-without-ports.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-workload-with-only-mesh-port-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-workload-with-only-mesh-port.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-without-ports.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-single-workload-address-without-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-single-workload-address-without-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-single-workload-address-without-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-single-workload-address-without-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-single-workload-address-without-ports.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/single-workload-address-without-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/single-workload-address-without-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/single-workload-address-without-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/single-workload-address-without-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/single-workload-address-without-ports.golden delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/controller.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/controller_test.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/data_fetcher.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/data_fetcher_test.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/helper_test.go delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/mapper.go delete mode 100644 internal/mesh/internal/controllers/xds/controller.go delete mode 100644 internal/mesh/internal/controllers/xds/controller_test.go delete mode 100644 internal/mesh/internal/controllers/xds/endpoint_builder.go delete mode 100644 internal/mesh/internal/controllers/xds/endpoint_builder_test.go delete mode 100644 internal/mesh/internal/controllers/xds/leaf_cancels.go delete mode 100644 internal/mesh/internal/controllers/xds/leaf_mapper.go delete mode 100644 internal/mesh/internal/controllers/xds/mock_updater.go delete mode 100644 internal/mesh/internal/controllers/xds/proxy_tracker_watch.go delete mode 100644 internal/mesh/internal/controllers/xds/reconciliation_data.go delete mode 100644 internal/mesh/internal/controllers/xds/status/status.go delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multi-destination-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multi-destination-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multi-destination-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multi-destination-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-ip-port-bind-address-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-ip-port-bind-address-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-ip-port-bind-address-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-unix-socket-bind-address-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-implicit-destination-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/mixed-multi-destination-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/mixed-multi-destination-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/mixed-multi-destination-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/mixed-multi-destination-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/l7-expose-paths-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/l7-expose-paths-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/l7-expose-paths-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/l7-expose-paths-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/l7-expose-paths.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/local-and-inbound-connections-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/local-and-inbound-connections-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/local-and-inbound-connections-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/local-and-inbound-connections-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/local-and-inbound-connections.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-with-specific-ports.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-without-ports.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-without-ports.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-single-workload-address-without-ports.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-workload-with-only-mesh-port-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-workload-with-only-mesh-port.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-without-ports.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-single-workload-address-without-ports.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/single-workload-address-without-ports-default-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/single-workload-address-without-ports-default-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/single-workload-address-without-ports-foo-bar.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/single-workload-address-without-ports-foo-default.golden delete mode 100644 internal/mesh/internal/controllers/xds/testdata/source/single-workload-address-without-ports.golden delete mode 100644 internal/mesh/internal/mappers/common/workload_selector_util.go delete mode 100644 internal/mesh/internal/mappers/common/workload_selector_util_test.go delete mode 100644 internal/mesh/internal/mappers/workloadselectionmapper/workload_selection_mapper.go delete mode 100644 internal/mesh/internal/mappers/workloadselectionmapper/workload_selection_mapper_test.go delete mode 100644 internal/mesh/internal/meshindexes/computed_routes.go delete mode 100644 internal/mesh/internal/meshindexes/computed_routes_test.go delete mode 100644 internal/mesh/internal/types/api_gateway.go delete mode 100644 internal/mesh/internal/types/computed_explicit_destinations.go delete mode 100644 internal/mesh/internal/types/computed_implicit_destinations.go delete mode 100644 internal/mesh/internal/types/computed_implicit_destinations_test.go delete mode 100644 internal/mesh/internal/types/computed_proxy_configuration.go delete mode 100644 internal/mesh/internal/types/computed_routes.go delete mode 100644 internal/mesh/internal/types/computed_routes_test.go delete mode 100644 internal/mesh/internal/types/decoded.go delete mode 100644 internal/mesh/internal/types/destination_policy.go delete mode 100644 internal/mesh/internal/types/destination_policy_test.go delete mode 100644 internal/mesh/internal/types/destinations.go delete mode 100644 internal/mesh/internal/types/destinations_configuration.go delete mode 100644 internal/mesh/internal/types/destinations_configuration_test.go delete mode 100644 internal/mesh/internal/types/destinations_test.go delete mode 100644 internal/mesh/internal/types/errors.go delete mode 100644 internal/mesh/internal/types/grpc_route.go delete mode 100644 internal/mesh/internal/types/grpc_route_test.go delete mode 100644 internal/mesh/internal/types/http_route.go delete mode 100644 internal/mesh/internal/types/http_route_test.go delete mode 100644 internal/mesh/internal/types/intermediate/types.go delete mode 100644 internal/mesh/internal/types/mesh_configuration.go delete mode 100644 internal/mesh/internal/types/mesh_gateway.go delete mode 100644 internal/mesh/internal/types/mesh_gateway_test.go delete mode 100644 internal/mesh/internal/types/proxy_configuration_test.go delete mode 100644 internal/mesh/internal/types/proxy_state_template.go delete mode 100644 internal/mesh/internal/types/proxy_state_template_test.go delete mode 100644 internal/mesh/internal/types/tcp_route.go delete mode 100644 internal/mesh/internal/types/tcp_route_test.go create mode 100644 internal/mesh/internal/types/upstreams.go delete mode 100644 internal/mesh/internal/types/util.go delete mode 100644 internal/mesh/internal/types/xroute.go delete mode 100644 internal/mesh/internal/types/xroute_test.go delete mode 100644 internal/mesh/proxy-snapshot/proxy_snapshot.go delete mode 100644 internal/mesh/proxy-tracker/mock_SessionLimiter.go delete mode 100644 internal/mesh/proxy-tracker/proxy_state_exports.go delete mode 100644 internal/mesh/proxy-tracker/proxy_state_exports_test.go delete mode 100644 internal/mesh/proxy-tracker/proxy_tracker.go delete mode 100644 internal/mesh/proxy-tracker/proxy_tracker_test.go delete mode 100644 internal/multicluster/exports.go delete mode 100644 internal/multicluster/internal/controllers/exportedservices/builder.go delete mode 100644 internal/multicluster/internal/controllers/exportedservices/controller.go delete mode 100644 internal/multicluster/internal/controllers/exportedservices/controller_test.go delete mode 100644 internal/multicluster/internal/controllers/exportedservices/expander/expander_ce.go delete mode 100644 internal/multicluster/internal/controllers/exportedservices/expander/expander_ce/expander.go delete mode 100644 internal/multicluster/internal/controllers/exportedservices/expander/expander_ce/expander_test.go delete mode 100644 internal/multicluster/internal/controllers/exportedservices/expander/types/types.go delete mode 100644 internal/multicluster/internal/controllers/exportedservices/helpers_ce.go delete mode 100644 internal/multicluster/internal/controllers/exportedservices/status.go delete mode 100644 internal/multicluster/internal/controllers/register.go delete mode 100644 internal/multicluster/internal/controllers/v1compat/controller.go delete mode 100644 internal/multicluster/internal/controllers/v1compat/controller_test.go delete mode 100644 internal/multicluster/internal/controllers/v1compat/mock_AggregatedConfig.go delete mode 100644 internal/multicluster/internal/types/computed_exported_services.go delete mode 100644 internal/multicluster/internal/types/computed_exported_services_test.go delete mode 100644 internal/multicluster/internal/types/decoded.go delete mode 100644 internal/multicluster/internal/types/exported_services.go delete mode 100644 internal/multicluster/internal/types/exported_services_test.go delete mode 100644 internal/multicluster/internal/types/helpers.go delete mode 100644 internal/multicluster/internal/types/helpers_ce.go delete mode 100644 internal/multicluster/internal/types/namespace_exported_services.go delete mode 100644 internal/multicluster/internal/types/namespace_exported_services_test.go delete mode 100644 internal/multicluster/internal/types/partition_exported_services.go delete mode 100644 internal/multicluster/internal/types/partition_exported_services_test.go delete mode 100644 internal/multicluster/internal/types/types.go delete mode 100644 internal/multicluster/internal/types/types_ce.go delete mode 100644 internal/protohcl/any.go delete mode 100644 internal/protohcl/attributes.go delete mode 100644 internal/protohcl/blocks.go delete mode 100644 internal/protohcl/cty.go delete mode 100644 internal/protohcl/decoder.go delete mode 100644 internal/protohcl/doc.go delete mode 100644 internal/protohcl/naming.go delete mode 100644 internal/protohcl/oneof.go delete mode 100644 internal/protohcl/primitives.go delete mode 100644 internal/protohcl/testproto/buf.gen.yaml delete mode 100644 internal/protohcl/testproto/example.pb.go delete mode 100644 internal/protohcl/testproto/example.proto delete mode 100644 internal/protohcl/unmarshal.go delete mode 100644 internal/protohcl/unmarshal_test.go delete mode 100644 internal/protohcl/well_known_types.go delete mode 100644 internal/protoutil/protoutil.go delete mode 100644 internal/resource/acls.go delete mode 100644 internal/resource/authz.go delete mode 100644 internal/resource/authz_ce.go delete mode 100644 internal/resource/authz_ce_test.go delete mode 100644 internal/resource/bound_refs.go delete mode 100644 internal/resource/decode.go delete mode 100644 internal/resource/decode_test.go delete mode 100644 internal/resource/filter.go delete mode 100644 internal/resource/filter_test.go delete mode 100644 internal/resource/hooks.go delete mode 100644 internal/resource/hooks_test.go delete mode 100644 internal/resource/http/http.go delete mode 100644 internal/resource/http/http_test.go delete mode 100644 internal/resource/mappers/bimapper/bimapper.go delete mode 100644 internal/resource/mappers/bimapper/bimapper_test.go delete mode 100644 internal/resource/mappers/selectiontracker/selection_tracker_test.go delete mode 100644 internal/resource/protoc-gen-deepcopy/internal/generate/generate.go delete mode 100644 internal/resource/protoc-gen-deepcopy/main.go delete mode 100644 internal/resource/protoc-gen-json-shim/internal/generate/generate.go delete mode 100644 internal/resource/protoc-gen-json-shim/main.go delete mode 100644 internal/resource/protoc-gen-resource-types/internal/generate/generate.go delete mode 100644 internal/resource/protoc-gen-resource-types/main.go delete mode 100644 internal/resource/refkey.go delete mode 100644 internal/resource/refkey_test.go delete mode 100644 internal/resource/registry_ce.go delete mode 100644 internal/resource/resource.go delete mode 100644 internal/resource/resource_test.go delete mode 100644 internal/resource/resourcetest/acls.go delete mode 100644 internal/resource/resourcetest/decode.go delete mode 100644 internal/resource/resourcetest/tenancy.go delete mode 100644 internal/resource/resourcetest/validation.go delete mode 100644 internal/resource/sort.go delete mode 100644 internal/resource/sort_test.go delete mode 100644 internal/resource/stringer.go delete mode 100644 internal/resource/tenancy.go delete mode 100644 internal/resource/tenancy_test.go delete mode 100644 internal/resourcehcl/any.go delete mode 100644 internal/resourcehcl/naming.go delete mode 100644 internal/resourcehcl/testdata/destinations.golden delete mode 100644 internal/resourcehcl/testdata/destinations.hcl delete mode 100644 internal/resourcehcl/testdata/fuzz/FuzzUnmarshall/0e4b8ec300611dbc delete mode 100644 internal/resourcehcl/testdata/fuzz/FuzzUnmarshall/c800420b7494c6d1 delete mode 100644 internal/resourcehcl/testdata/fuzz/FuzzUnmarshall/eaba8205942c3f31 delete mode 100644 internal/resourcehcl/testdata/gvk-no-arguments.error delete mode 100644 internal/resourcehcl/testdata/gvk-no-arguments.hcl delete mode 100644 internal/resourcehcl/testdata/invalid-group.error delete mode 100644 internal/resourcehcl/testdata/invalid-group.hcl delete mode 100644 internal/resourcehcl/testdata/invalid-gvk.error delete mode 100644 internal/resourcehcl/testdata/invalid-gvk.hcl delete mode 100644 internal/resourcehcl/testdata/invalid-metadata.error delete mode 100644 internal/resourcehcl/testdata/invalid-metadata.hcl delete mode 100644 internal/resourcehcl/testdata/invalid-name.error delete mode 100644 internal/resourcehcl/testdata/invalid-name.hcl delete mode 100644 internal/resourcehcl/testdata/no-blocks-any-first.golden delete mode 100644 internal/resourcehcl/testdata/no-blocks-any-first.hcl delete mode 100644 internal/resourcehcl/testdata/no-blocks.golden delete mode 100644 internal/resourcehcl/testdata/no-blocks.hcl delete mode 100644 internal/resourcehcl/testdata/owner.golden delete mode 100644 internal/resourcehcl/testdata/owner.hcl delete mode 100644 internal/resourcehcl/testdata/simple-gvk.golden delete mode 100644 internal/resourcehcl/testdata/simple-gvk.hcl delete mode 100644 internal/resourcehcl/testdata/type-block.golden delete mode 100644 internal/resourcehcl/testdata/type-block.hcl delete mode 100644 internal/resourcehcl/testdata/unknown-field-block.error delete mode 100644 internal/resourcehcl/testdata/unknown-field-block.hcl delete mode 100644 internal/resourcehcl/testdata/unknown-field-object.error delete mode 100644 internal/resourcehcl/testdata/unknown-field-object.hcl delete mode 100644 internal/resourcehcl/testdata/unknown-type.error delete mode 100644 internal/resourcehcl/testdata/unknown-type.hcl delete mode 100644 internal/resourcehcl/unmarshal.go delete mode 100644 internal/resourcehcl/unmarshal_test.go delete mode 100644 internal/tenancy/exports.go delete mode 100644 internal/tenancy/internal/bridge/tenancy_bridge.go delete mode 100644 internal/tenancy/internal/bridge/tenancy_bridge_ce.go delete mode 100644 internal/tenancy/internal/controllers/common/common.go delete mode 100644 internal/tenancy/internal/controllers/namespace/controller.go delete mode 100644 internal/tenancy/internal/controllers/register.go delete mode 100644 internal/tenancy/internal/controllers/register_ce.go delete mode 100644 internal/tenancy/internal/types/errors.go delete mode 100644 internal/tenancy/internal/types/namespace.go delete mode 100644 internal/tenancy/internal/types/types.go delete mode 100644 internal/tenancy/internal/types/types_ce.go delete mode 100644 internal/tenancy/internal/types/types_test.go delete mode 100644 internal/tenancy/tenancytest/namespace_controller_test.go delete mode 100644 internal/tenancy/tenancytest/namespace_test.go delete mode 100644 internal/testing/errors/errors.go delete mode 100644 internal/tools/protoc-gen-grpc-clone/e2e/.mockery.yaml delete mode 100644 internal/tools/protoc-gen-grpc-clone/e2e/e2e_test.go delete mode 100644 internal/tools/protoc-gen-grpc-clone/e2e/mock_SimpleClient_test.go delete mode 100644 internal/tools/protoc-gen-grpc-clone/e2e/mock_Simple_FlowClient_test.go delete mode 100644 internal/tools/protoc-gen-grpc-clone/e2e/proto/buf.gen.yaml delete mode 100644 internal/tools/protoc-gen-grpc-clone/e2e/proto/cloning_stream.pb.go delete mode 100644 internal/tools/protoc-gen-grpc-clone/e2e/proto/service.pb.go delete mode 100644 internal/tools/protoc-gen-grpc-clone/e2e/proto/service.proto delete mode 100644 internal/tools/protoc-gen-grpc-clone/e2e/proto/service_cloning_grpc.pb.go delete mode 100644 internal/tools/protoc-gen-grpc-clone/e2e/proto/service_grpc.pb.go delete mode 100644 internal/tools/protoc-gen-grpc-clone/internal/generate/generate.go delete mode 100644 internal/tools/protoc-gen-grpc-clone/internal/generate/templates/cloning-stream.tmpl delete mode 100644 internal/tools/protoc-gen-grpc-clone/internal/generate/templates/file.tmpl delete mode 100644 internal/tools/protoc-gen-grpc-clone/internal/generate/templates/server-stream-method.tmpl delete mode 100644 internal/tools/protoc-gen-grpc-clone/internal/generate/templates/service.tmpl delete mode 100644 internal/tools/protoc-gen-grpc-clone/internal/generate/templates/unary-method.tmpl delete mode 100644 internal/tools/protoc-gen-grpc-clone/main.go delete mode 100644 lib/hoststats/collector_test.go delete mode 100644 lib/testhelpers/testhelpers.go delete mode 100644 proto-public/LICENSE delete mode 100644 proto-public/annotations/ratelimit/ratelimit_deepcopy.gen.go delete mode 100644 proto-public/annotations/ratelimit/ratelimit_json.gen.go delete mode 100644 proto-public/buf.lock delete mode 100644 proto-public/pbacl/acl_cloning_grpc.pb.go delete mode 100644 proto-public/pbacl/acl_deepcopy.gen.go delete mode 100644 proto-public/pbacl/acl_json.gen.go delete mode 100644 proto-public/pbauth/v2beta1/computed_traffic_permissions.pb.binary.go delete mode 100644 proto-public/pbauth/v2beta1/computed_traffic_permissions.pb.go delete mode 100644 proto-public/pbauth/v2beta1/computed_traffic_permissions.proto delete mode 100644 proto-public/pbauth/v2beta1/computed_traffic_permissions_deepcopy.gen.go delete mode 100644 proto-public/pbauth/v2beta1/computed_traffic_permissions_json.gen.go delete mode 100644 proto-public/pbauth/v2beta1/resources.rtypes.go delete mode 100644 proto-public/pbauth/v2beta1/traffic_permission_extras_test.go delete mode 100644 proto-public/pbauth/v2beta1/traffic_permissions.pb.binary.go delete mode 100644 proto-public/pbauth/v2beta1/traffic_permissions.pb.go delete mode 100644 proto-public/pbauth/v2beta1/traffic_permissions.proto delete mode 100644 proto-public/pbauth/v2beta1/traffic_permissions_addon.go delete mode 100644 proto-public/pbauth/v2beta1/traffic_permissions_deepcopy.gen.go delete mode 100644 proto-public/pbauth/v2beta1/traffic_permissions_extras.go delete mode 100644 proto-public/pbauth/v2beta1/traffic_permissions_json.gen.go delete mode 100644 proto-public/pbauth/v2beta1/workload_identity.pb.binary.go delete mode 100644 proto-public/pbauth/v2beta1/workload_identity.pb.go delete mode 100644 proto-public/pbauth/v2beta1/workload_identity.proto delete mode 100644 proto-public/pbauth/v2beta1/workload_identity_deepcopy.gen.go delete mode 100644 proto-public/pbauth/v2beta1/workload_identity_json.gen.go rename proto-public/{pbmesh/v2beta1/pbproxystate/endpoints.pb.binary.go => pbcatalog/v1alpha1/dns.pb.binary.go} (60%) create mode 100644 proto-public/pbcatalog/v1alpha1/dns.pb.go create mode 100644 proto-public/pbcatalog/v1alpha1/dns.proto rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/health.pb.binary.go (88%) create mode 100644 proto-public/pbcatalog/v1alpha1/health.pb.go rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/health.proto (57%) rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/node.pb.binary.go (91%) create mode 100644 proto-public/pbcatalog/v1alpha1/node.pb.go rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/node.proto (70%) create mode 100644 proto-public/pbcatalog/v1alpha1/protocol.pb.go create mode 100644 proto-public/pbcatalog/v1alpha1/protocol.proto rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/selector.pb.binary.go (85%) create mode 100644 proto-public/pbcatalog/v1alpha1/selector.pb.go rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/selector.proto (80%) rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/service.pb.binary.go (90%) create mode 100644 proto-public/pbcatalog/v1alpha1/service.pb.go rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/service.proto (63%) rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/service_endpoints.pb.binary.go (89%) create mode 100644 proto-public/pbcatalog/v1alpha1/service_endpoints.pb.go rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/service_endpoints.proto (65%) rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/vip.pb.binary.go (91%) create mode 100644 proto-public/pbcatalog/v1alpha1/vip.pb.go rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/vip.proto (72%) rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/workload.pb.binary.go (66%) create mode 100644 proto-public/pbcatalog/v1alpha1/workload.pb.go rename proto-public/pbcatalog/{v2beta1 => v1alpha1}/workload.proto (83%) delete mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy.pb.binary.go delete mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy.pb.go delete mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy.proto delete mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy_deepcopy.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy_extras.go delete mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy_extras_test.go delete mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy_json.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/failover_policy.pb.binary.go delete mode 100644 proto-public/pbcatalog/v2beta1/failover_policy.pb.go delete mode 100644 proto-public/pbcatalog/v2beta1/failover_policy.proto delete mode 100644 proto-public/pbcatalog/v2beta1/failover_policy_deepcopy.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/failover_policy_extras.go delete mode 100644 proto-public/pbcatalog/v2beta1/failover_policy_extras_test.go delete mode 100644 proto-public/pbcatalog/v2beta1/failover_policy_json.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/health.pb.go delete mode 100644 proto-public/pbcatalog/v2beta1/health_deepcopy.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/health_json.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/node.pb.go delete mode 100644 proto-public/pbcatalog/v2beta1/node_deepcopy.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/node_json.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/protocol.pb.go delete mode 100644 proto-public/pbcatalog/v2beta1/protocol.proto delete mode 100644 proto-public/pbcatalog/v2beta1/resources.rtypes.go delete mode 100644 proto-public/pbcatalog/v2beta1/selector.pb.go delete mode 100644 proto-public/pbcatalog/v2beta1/selector_deepcopy.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/selector_json.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/service.pb.go delete mode 100644 proto-public/pbcatalog/v2beta1/service_addon.go delete mode 100644 proto-public/pbcatalog/v2beta1/service_addon_test.go delete mode 100644 proto-public/pbcatalog/v2beta1/service_deepcopy.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints.pb.go delete mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints_addon.go delete mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints_addon_test.go delete mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints_deepcopy.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints_json.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/service_json.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/vip.pb.go delete mode 100644 proto-public/pbcatalog/v2beta1/vip_deepcopy.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/vip_json.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/workload.pb.go delete mode 100644 proto-public/pbcatalog/v2beta1/workload_addon.go delete mode 100644 proto-public/pbcatalog/v2beta1/workload_addon_test.go delete mode 100644 proto-public/pbcatalog/v2beta1/workload_deepcopy.gen.go delete mode 100644 proto-public/pbcatalog/v2beta1/workload_json.gen.go delete mode 100644 proto-public/pbconnectca/ca_cloning_grpc.pb.go delete mode 100644 proto-public/pbconnectca/ca_deepcopy.gen.go delete mode 100644 proto-public/pbconnectca/ca_json.gen.go delete mode 100644 proto-public/pbconnectca/cloning_stream.pb.go delete mode 100644 proto-public/pbdataplane/dataplane_cloning_grpc.pb.go delete mode 100644 proto-public/pbdataplane/dataplane_deepcopy.gen.go delete mode 100644 proto-public/pbdataplane/dataplane_json.gen.go delete mode 100644 proto-public/pbdns/dns_cloning_grpc.pb.go delete mode 100644 proto-public/pbdns/dns_deepcopy.gen.go delete mode 100644 proto-public/pbdns/dns_json.gen.go create mode 100644 proto-public/pbdns/mock_DNSServiceClient.go create mode 100644 proto-public/pbdns/mock_DNSServiceServer.go create mode 100644 proto-public/pbdns/mock_UnsafeDNSServiceServer.go delete mode 100644 proto-public/pbhcp/v2/hcp_config.pb.binary.go delete mode 100644 proto-public/pbhcp/v2/hcp_config.pb.go delete mode 100644 proto-public/pbhcp/v2/hcp_config.proto delete mode 100644 proto-public/pbhcp/v2/hcp_config_deepcopy.gen.go delete mode 100644 proto-public/pbhcp/v2/hcp_config_json.gen.go delete mode 100644 proto-public/pbhcp/v2/link.pb.binary.go delete mode 100644 proto-public/pbhcp/v2/link.pb.go delete mode 100644 proto-public/pbhcp/v2/link.proto delete mode 100644 proto-public/pbhcp/v2/link_deepcopy.gen.go delete mode 100644 proto-public/pbhcp/v2/link_json.gen.go delete mode 100644 proto-public/pbhcp/v2/resources.rtypes.go delete mode 100644 proto-public/pbhcp/v2/telemetry_state.pb.binary.go delete mode 100644 proto-public/pbhcp/v2/telemetry_state.pb.go delete mode 100644 proto-public/pbhcp/v2/telemetry_state.proto delete mode 100644 proto-public/pbhcp/v2/telemetry_state_deepcopy.gen.go delete mode 100644 proto-public/pbhcp/v2/telemetry_state_json.gen.go rename proto-public/pbmesh/{v2beta1 => v1alpha1}/connection.pb.binary.go (91%) create mode 100644 proto-public/pbmesh/v1alpha1/connection.pb.go create mode 100644 proto-public/pbmesh/v1alpha1/connection.proto rename proto-public/pbmesh/{v2beta1 => v1alpha1}/expose.pb.binary.go (91%) create mode 100644 proto-public/pbmesh/v1alpha1/expose.pb.go create mode 100644 proto-public/pbmesh/v1alpha1/expose.proto rename proto-public/pbmesh/{v2beta1/proxy_configuration.pb.binary.go => v1alpha1/proxy.pb.binary.go} (66%) create mode 100644 proto-public/pbmesh/v1alpha1/proxy.pb.go create mode 100644 proto-public/pbmesh/v1alpha1/proxy.proto create mode 100644 proto-public/pbmesh/v1alpha1/routing.pb.go rename proto-public/pbmesh/{v2beta1 => v1alpha1}/routing.proto (84%) rename proto-public/pbmesh/{v2beta1/pbproxystate/listener.pb.binary.go => v1alpha1/upstreams.pb.binary.go} (59%) create mode 100644 proto-public/pbmesh/v1alpha1/upstreams.pb.go create mode 100644 proto-public/pbmesh/v1alpha1/upstreams.proto delete mode 100644 proto-public/pbmesh/v2beta1/api_gateway.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/api_gateway.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/api_gateway.proto delete mode 100644 proto-public/pbmesh/v2beta1/api_gateway_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/api_gateway_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/common.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/common.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/common.proto delete mode 100644 proto-public/pbmesh/v2beta1/common_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/common_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_explicit_destinations.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_explicit_destinations.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_explicit_destinations.proto delete mode 100644 proto-public/pbmesh/v2beta1/computed_explicit_destinations_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_explicit_destinations_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_gateway_routes.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_gateway_routes.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_gateway_routes.proto delete mode 100644 proto-public/pbmesh/v2beta1/computed_gateway_routes_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_gateway_routes_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_implicit_destinations.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_implicit_destinations.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_implicit_destinations.proto delete mode 100644 proto-public/pbmesh/v2beta1/computed_implicit_destinations_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_implicit_destinations_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_proxy_configuration.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_proxy_configuration.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_proxy_configuration.proto delete mode 100644 proto-public/pbmesh/v2beta1/computed_proxy_configuration_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_proxy_configuration_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_routes.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_routes.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_routes.proto delete mode 100644 proto-public/pbmesh/v2beta1/computed_routes_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/computed_routes_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/connection.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/connection.proto delete mode 100644 proto-public/pbmesh/v2beta1/connection_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/connection_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/destination_policy.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/destination_policy.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/destination_policy.proto delete mode 100644 proto-public/pbmesh/v2beta1/destination_policy_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/destination_policy_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/destinations.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/destinations.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/destinations.proto delete mode 100644 proto-public/pbmesh/v2beta1/destinations_configuration.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/destinations_configuration.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/destinations_configuration.proto delete mode 100644 proto-public/pbmesh/v2beta1/destinations_configuration_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/destinations_configuration_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/destinations_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/destinations_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/expose.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/expose.proto delete mode 100644 proto-public/pbmesh/v2beta1/expose_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/expose_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/grpc_route.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/grpc_route.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/grpc_route.proto delete mode 100644 proto-public/pbmesh/v2beta1/grpc_route_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/grpc_route_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route.proto delete mode 100644 proto-public/pbmesh/v2beta1/http_route_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route_retries.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route_retries.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route_retries.proto delete mode 100644 proto-public/pbmesh/v2beta1/http_route_retries_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route_retries_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route_timeouts.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route_timeouts.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route_timeouts.proto delete mode 100644 proto-public/pbmesh/v2beta1/http_route_timeouts_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/http_route_timeouts_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/mesh_configuration.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/mesh_configuration.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/mesh_configuration.proto delete mode 100644 proto-public/pbmesh/v2beta1/mesh_configuration_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/mesh_configuration_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/mesh_gateway.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/mesh_gateway.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/mesh_gateway.proto delete mode 100644 proto-public/pbmesh/v2beta1/mesh_gateway_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/mesh_gateway_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/access_logs.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/access_logs.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/access_logs.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/access_logs_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/access_logs_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/address.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/address.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/address.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/address_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/address_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/cluster.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/cluster.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/cluster.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/cluster_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/cluster_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/endpoints.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/endpoints.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/endpoints_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/endpoints_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/header_mutations.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/header_mutations.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/header_mutations.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/header_mutations_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/header_mutations_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/intentions.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/listener.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/listener.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/listener_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/listener_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/protocol.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/protocol.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/protocol_test.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/references.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/references.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/references.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/references_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/references_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/route.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/route.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/route.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/route_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/route_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/transport_socket.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/transport_socket.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/transport_socket.proto delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/transport_socket_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/transport_socket_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration.proto delete mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration_addon.go delete mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration_addon_test.go delete mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/proxy_state.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/proxy_state.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/proxy_state.proto delete mode 100644 proto-public/pbmesh/v2beta1/proxy_state_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/proxy_state_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/resources.rtypes.go delete mode 100644 proto-public/pbmesh/v2beta1/routing.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/tcp_route.pb.binary.go delete mode 100644 proto-public/pbmesh/v2beta1/tcp_route.pb.go delete mode 100644 proto-public/pbmesh/v2beta1/tcp_route.proto delete mode 100644 proto-public/pbmesh/v2beta1/tcp_route_deepcopy.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/tcp_route_json.gen.go delete mode 100644 proto-public/pbmesh/v2beta1/xroute_addons.go delete mode 100644 proto-public/pbmesh/v2beta1/xroute_addons_test.go delete mode 100644 proto-public/pbmulticluster/v2/computed_exported_services.pb.binary.go delete mode 100644 proto-public/pbmulticluster/v2/computed_exported_services.pb.go delete mode 100644 proto-public/pbmulticluster/v2/computed_exported_services.proto delete mode 100644 proto-public/pbmulticluster/v2/computed_exported_services_deepcopy.gen.go delete mode 100644 proto-public/pbmulticluster/v2/computed_exported_services_json.gen.go delete mode 100644 proto-public/pbmulticluster/v2/exported_services.pb.binary.go delete mode 100644 proto-public/pbmulticluster/v2/exported_services.pb.go delete mode 100644 proto-public/pbmulticluster/v2/exported_services.proto delete mode 100644 proto-public/pbmulticluster/v2/exported_services_consumer.pb.binary.go delete mode 100644 proto-public/pbmulticluster/v2/exported_services_consumer.pb.go delete mode 100644 proto-public/pbmulticluster/v2/exported_services_consumer.proto delete mode 100644 proto-public/pbmulticluster/v2/exported_services_consumer_deepcopy.gen.go delete mode 100644 proto-public/pbmulticluster/v2/exported_services_consumer_json.gen.go delete mode 100644 proto-public/pbmulticluster/v2/exported_services_deepcopy.gen.go delete mode 100644 proto-public/pbmulticluster/v2/exported_services_json.gen.go delete mode 100644 proto-public/pbmulticluster/v2/namespace_exported_services.pb.binary.go delete mode 100644 proto-public/pbmulticluster/v2/namespace_exported_services.pb.go delete mode 100644 proto-public/pbmulticluster/v2/namespace_exported_services.proto delete mode 100644 proto-public/pbmulticluster/v2/namespace_exported_services_deepcopy.gen.go delete mode 100644 proto-public/pbmulticluster/v2/namespace_exported_services_json.gen.go delete mode 100644 proto-public/pbmulticluster/v2/partition_exported_services.pb.binary.go delete mode 100644 proto-public/pbmulticluster/v2/partition_exported_services.pb.go delete mode 100644 proto-public/pbmulticluster/v2/partition_exported_services.proto delete mode 100644 proto-public/pbmulticluster/v2/partition_exported_services_deepcopy.gen.go delete mode 100644 proto-public/pbmulticluster/v2/partition_exported_services_json.gen.go delete mode 100644 proto-public/pbmulticluster/v2/resources.rtypes.go delete mode 100644 proto-public/pbmulticluster/v2beta1/resources.rtypes.go delete mode 100644 proto-public/pbmulticluster/v2beta1/sameness_group.pb.binary.go delete mode 100644 proto-public/pbmulticluster/v2beta1/sameness_group.pb.go delete mode 100644 proto-public/pbmulticluster/v2beta1/sameness_group.proto delete mode 100644 proto-public/pbmulticluster/v2beta1/sameness_group_deepcopy.gen.go delete mode 100644 proto-public/pbmulticluster/v2beta1/sameness_group_json.gen.go delete mode 100644 proto-public/pbresource/annotations.pb.binary.go delete mode 100644 proto-public/pbresource/annotations.pb.go delete mode 100644 proto-public/pbresource/annotations.proto delete mode 100644 proto-public/pbresource/annotations_deepcopy.gen.go delete mode 100644 proto-public/pbresource/annotations_json.gen.go delete mode 100644 proto-public/pbresource/cloning_stream.pb.go delete mode 100644 proto-public/pbresource/resource_cloning_grpc.pb.go delete mode 100644 proto-public/pbresource/resource_deepcopy.gen.go delete mode 100644 proto-public/pbresource/resource_json.gen.go delete mode 100644 proto-public/pbserverdiscovery/cloning_stream.pb.go delete mode 100644 proto-public/pbserverdiscovery/serverdiscovery_cloning_grpc.pb.go delete mode 100644 proto-public/pbserverdiscovery/serverdiscovery_deepcopy.gen.go delete mode 100644 proto-public/pbserverdiscovery/serverdiscovery_json.gen.go delete mode 100644 proto-public/pbtenancy/v2beta1/namespace.pb.binary.go delete mode 100644 proto-public/pbtenancy/v2beta1/namespace.pb.go delete mode 100644 proto-public/pbtenancy/v2beta1/namespace.proto delete mode 100644 proto-public/pbtenancy/v2beta1/namespace_deepcopy.gen.go delete mode 100644 proto-public/pbtenancy/v2beta1/namespace_json.gen.go delete mode 100644 proto-public/pbtenancy/v2beta1/partition.pb.binary.go delete mode 100644 proto-public/pbtenancy/v2beta1/partition.pb.go delete mode 100644 proto-public/pbtenancy/v2beta1/partition.proto delete mode 100644 proto-public/pbtenancy/v2beta1/partition_deepcopy.gen.go delete mode 100644 proto-public/pbtenancy/v2beta1/partition_json.gen.go delete mode 100644 proto-public/pbtenancy/v2beta1/resources.rtypes.go delete mode 100644 proto/private/pbconfigentry/config_entry_ce.go delete mode 100644 proto/private/pbconfigentry/config_entry_grpc.pb.go delete mode 100644 proto/private/pbdemo/v1/resources.rtypes.go delete mode 100644 proto/private/pbdemo/v2/resources.rtypes.go delete mode 100644 proto/private/prototest/golden_json.go delete mode 100644 sdk/LICENSE delete mode 100644 sdk/testutil/retry/counter.go delete mode 100644 sdk/testutil/retry/doc.go delete mode 100644 sdk/testutil/retry/interface.go delete mode 100644 sdk/testutil/retry/output.go delete mode 100644 sdk/testutil/retry/retryer.go delete mode 100644 sdk/testutil/retry/run.go delete mode 100644 sdk/testutil/retry/timer.go delete mode 100644 test-integ/Makefile delete mode 100644 test-integ/README.md delete mode 100644 test-integ/catalogv2/explicit_destinations_l7_test.go delete mode 100644 test-integ/catalogv2/explicit_destinations_test.go delete mode 100644 test-integ/catalogv2/helpers_test.go delete mode 100644 test-integ/catalogv2/implicit_destinations_test.go delete mode 100644 test-integ/catalogv2/traffic_permissions_test.go delete mode 100644 test-integ/connect/snapshot_test.go delete mode 100644 test-integ/go.mod delete mode 100644 test-integ/go.sum delete mode 100644 test-integ/peering_commontopo/README.md delete mode 100644 test-integ/peering_commontopo/ac1_basic_test.go delete mode 100644 test-integ/peering_commontopo/ac2_disco_chain_test.go delete mode 100644 test-integ/peering_commontopo/ac3_service_defaults_upstream_test.go delete mode 100644 test-integ/peering_commontopo/ac4_proxy_defaults_test.go delete mode 100644 test-integ/peering_commontopo/ac5_1_no_svc_mesh_test.go delete mode 100644 test-integ/peering_commontopo/ac5_2_pq_failover_test.go delete mode 100644 test-integ/peering_commontopo/ac6_failovers_test.go delete mode 100644 test-integ/peering_commontopo/ac7_1_rotate_gw_test.go delete mode 100644 test-integ/peering_commontopo/ac7_2_rotate_leader_test.go delete mode 100644 test-integ/peering_commontopo/commontopo.go delete mode 100644 test-integ/peering_commontopo/sharedtopology_test.go delete mode 100644 test-integ/tenancy/client.go delete mode 100644 test-integ/tenancy/common.go delete mode 100644 test-integ/tenancy/namespace_ce_test.go delete mode 100644 test-integ/topoutil/asserter.go delete mode 100644 test-integ/topoutil/asserter_blankspace.go delete mode 100644 test-integ/topoutil/blankspace.go delete mode 100644 test-integ/topoutil/fixtures.go delete mode 100644 test-integ/topoutil/http2.go delete mode 100644 test-integ/topoutil/http_consul.go delete mode 100644 test-integ/topoutil/naming_shim.go delete mode 100644 test-integ/upgrade/basic/common.go delete mode 100644 test-integ/upgrade/basic/upgrade_basic_test.go delete mode 100644 test-integ/upgrade/l7_traffic_management/common.go delete mode 100644 test-integ/upgrade/l7_traffic_management/resolver_test.go delete mode 100644 test/integration/connect/envoy/Dockerfile-consul-envoy-windows delete mode 100644 test/integration/connect/envoy/Dockerfile-tcpdump-windows delete mode 100644 test/integration/connect/envoy/Dockerfile-test-sds-server-windows delete mode 100644 test/integration/connect/envoy/WINDOWS-TEST.md delete mode 100644 test/integration/connect/envoy/case-wanfed-gw/global-setup-windows.sh delete mode 100644 test/integration/connect/envoy/docker-windows.md delete mode 100644 test/integration/connect/envoy/docs/img/linux-arch.png delete mode 100644 test/integration/connect/envoy/docs/img/windows-arch-singlecontainer.png delete mode 100644 test/integration/connect/envoy/docs/img/windows-linux-arch.png delete mode 100644 test/integration/connect/envoy/docs/windows-testing-architecture.md delete mode 100644 test/integration/connect/envoy/helpers.windows.bash delete mode 100644 test/integration/connect/envoy/run-tests.windows.sh delete mode 100644 test/integration/connect/envoy/windows-troubleshooting.md delete mode 100644 test/integration/consul-container/assets/Dockerfile-consul-dataplane delete mode 100644 test/integration/consul-container/libs/cluster/dataplane.go delete mode 100644 test/integration/consul-container/test/catalog/catalog_test.go delete mode 100644 test/integration/consul-container/test/debugging.md delete mode 100644 test/integration/consul-container/test/envoy_extensions/otel_access_logging_test.go delete mode 100644 test/integration/consul-container/test/envoy_extensions/testdata/otel/config.yaml delete mode 100644 test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/Dockerfile delete mode 100644 test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/README.md delete mode 100755 test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/build.sh delete mode 100644 test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/go.mod delete mode 100644 test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/go.sum delete mode 100644 test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/nginx.conf delete mode 100644 test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/wasm_add_header.go delete mode 100755 test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/wasm_add_header.wasm delete mode 100644 test/integration/consul-container/test/envoy_extensions/wasm_test.go delete mode 100644 test/integration/consul-container/test/gateways/terminating_gateway_test.go delete mode 100644 test/integration/consul-container/test/resource/grpc_forwarding_test.go delete mode 100644 test/integration/consul-container/test/resource/http_api/acl_enabled_test.go delete mode 100644 test/integration/consul-container/test/resource/http_api/client/client.go delete mode 100644 test/integration/consul-container/test/resource/http_api/helper.go delete mode 100644 test/integration/consul-container/test/trafficpermissions/tcp_test.go create mode 100644 test/integration/consul-container/test/upgrade/basic/fullstopupgrade_test.go delete mode 100644 test/integration/consul-container/test/upgrade/catalog/catalog_test.go delete mode 100644 test/integration/consul-container/test/util/test_debug_breakpoint_hit.png delete mode 100644 test/integration/consul-container/test/util/test_debug_configuration.png delete mode 100644 test/integration/consul-container/test/util/test_debug_info.png delete mode 100644 test/integration/consul-container/test/util/test_debug_remote_configuration.png delete mode 100644 test/integration/consul-container/test/util/test_debug_remote_connected.png delete mode 100644 test/integration/consul-container/test/util/test_debug_resume_program.png delete mode 100644 testing/deployer/.gitignore delete mode 100644 testing/deployer/README.md delete mode 100644 testing/deployer/TODO.md delete mode 100644 testing/deployer/go.mod delete mode 100644 testing/deployer/go.sum delete mode 100644 testing/deployer/sprawl/acl.go delete mode 100644 testing/deployer/sprawl/acl_rules.go delete mode 100644 testing/deployer/sprawl/boot.go delete mode 100644 testing/deployer/sprawl/catalog.go delete mode 100644 testing/deployer/sprawl/configentries.go delete mode 100644 testing/deployer/sprawl/consul.go delete mode 100644 testing/deployer/sprawl/debug.go delete mode 100644 testing/deployer/sprawl/details.go delete mode 100644 testing/deployer/sprawl/ent.go delete mode 100644 testing/deployer/sprawl/grpc.go delete mode 100644 testing/deployer/sprawl/helpers.go delete mode 100644 testing/deployer/sprawl/internal/build/docker.go delete mode 100644 testing/deployer/sprawl/internal/runner/exec.go delete mode 100644 testing/deployer/sprawl/internal/secrets/store.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/agent.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/digest.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/dns.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/docker.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/docker_test.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/gen.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/io.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/nodes.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/prelude.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/proxy.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/res.go delete mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-app-dataplane.tf.tmpl delete mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-app-sidecar.tf.tmpl delete mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-app.tf.tmpl delete mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-consul.tf.tmpl delete mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-coredns.tf.tmpl delete mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-mgw-dataplane.tf.tmpl delete mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-mgw.tf.tmpl delete mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-pause.tf.tmpl delete mode 100644 testing/deployer/sprawl/internal/tfgen/templates/container-proxy.tf.tmpl delete mode 100644 testing/deployer/sprawl/internal/tfgen/tfgen.go delete mode 100644 testing/deployer/sprawl/network_area_ce.go delete mode 100644 testing/deployer/sprawl/peering.go delete mode 100644 testing/deployer/sprawl/resources.go delete mode 100644 testing/deployer/sprawl/sprawl.go delete mode 100644 testing/deployer/sprawl/sprawltest/sprawltest.go delete mode 100644 testing/deployer/sprawl/sprawltest/test_test.go delete mode 100644 testing/deployer/sprawl/tls.go delete mode 100644 testing/deployer/topology/compile.go delete mode 100644 testing/deployer/topology/default_versions.go delete mode 100644 testing/deployer/topology/ids.go delete mode 100644 testing/deployer/topology/images.go delete mode 100644 testing/deployer/topology/images_test.go delete mode 100644 testing/deployer/topology/naming_shim.go delete mode 100644 testing/deployer/topology/relationships.go delete mode 100644 testing/deployer/topology/topology.go delete mode 100644 testing/deployer/topology/util.go delete mode 100644 testing/deployer/topology/util_test.go delete mode 100755 testing/deployer/update-latest-versions.sh delete mode 100644 testing/deployer/util/consul.go delete mode 100644 testing/deployer/util/files.go delete mode 100644 testing/deployer/util/internal/ipamutils/doc.go delete mode 100644 testing/deployer/util/internal/ipamutils/utils.go delete mode 100644 testing/deployer/util/internal/ipamutils/utils_test.go delete mode 100644 testing/deployer/util/net.go delete mode 100644 testing/deployer/util/v2.go delete mode 100644 testing/deployer/util/v2_decode.go create mode 100644 ui/packages/consul-hcp/app/components/consul/hcp/home/index.hbs create mode 100644 ui/packages/consul-hcp/app/components/consul/hcp/home/index.scss create mode 100644 ui/packages/consul-hcp/app/components/consul/hcp/home/index.test.js create mode 100644 ui/packages/consul-hcp/package.json create mode 100644 ui/packages/consul-hcp/vendor/consul-hcp/routes.js create mode 100644 ui/packages/consul-hcp/vendor/consul-hcp/services.js delete mode 100644 ui/packages/consul-ui/app/abilities/operator.js create mode 100644 ui/packages/consul-ui/app/components/auth-profile/README.mdx create mode 100644 ui/packages/consul-ui/app/components/auth-profile/index.hbs create mode 100644 ui/packages/consul-ui/app/components/auth-profile/index.scss delete mode 100644 ui/packages/consul-ui/app/components/consul/datacenter/selector/index.js delete mode 100644 ui/packages/consul-ui/app/components/consul/service/list/item/index.hbs delete mode 100644 ui/packages/consul-ui/app/components/consul/service/list/item/index.js rename ui/packages/consul-ui/app/components/{consul-copy-button => copy-button}/README.mdx (93%) rename ui/packages/consul-ui/app/components/{consul-copy-button => copy-button}/chart.xstate.js (92%) rename ui/packages/consul-ui/app/components/{consul-copy-button => copy-button}/index.hbs (72%) rename ui/packages/consul-ui/app/components/{consul-copy-button => copy-button}/index.js (67%) rename ui/packages/consul-ui/app/components/{consul-copy-button => copy-button}/index.scss (93%) rename ui/packages/consul-ui/app/components/{consul-copy-button => copy-button}/layout.scss (94%) rename ui/packages/consul-ui/app/components/{consul-copy-button => copy-button}/skin.scss (94%) delete mode 100644 ui/packages/consul-ui/app/components/hcp-nav-item/index.hbs delete mode 100644 ui/packages/consul-ui/app/components/hcp-nav-item/index.js delete mode 100644 ui/packages/consul-ui/app/components/link-to-hcp-banner/index.hbs delete mode 100644 ui/packages/consul-ui/app/components/link-to-hcp-banner/index.js delete mode 100644 ui/packages/consul-ui/app/components/link-to-hcp-modal/index.hbs delete mode 100644 ui/packages/consul-ui/app/components/link-to-hcp-modal/index.js delete mode 100644 ui/packages/consul-ui/app/components/link-to-hcp-modal/index.scss create mode 100644 ui/packages/consul-ui/app/components/main-header-horizontal/index.scss create mode 100644 ui/packages/consul-ui/app/components/main-header-horizontal/layout.scss create mode 100644 ui/packages/consul-ui/app/components/main-header-horizontal/skin.scss create mode 100644 ui/packages/consul-ui/app/components/main-nav-horizontal/index.scss create mode 100644 ui/packages/consul-ui/app/components/main-nav-horizontal/layout.scss create mode 100644 ui/packages/consul-ui/app/components/main-nav-horizontal/skin.scss create mode 100644 ui/packages/consul-ui/app/components/main-nav-vertical/README.mdx create mode 100644 ui/packages/consul-ui/app/components/main-nav-vertical/debug.scss create mode 100644 ui/packages/consul-ui/app/components/main-nav-vertical/index.scss create mode 100644 ui/packages/consul-ui/app/components/main-nav-vertical/layout.scss create mode 100644 ui/packages/consul-ui/app/components/main-nav-vertical/skin.scss delete mode 100644 ui/packages/consul-ui/app/components/nav-selector/generic.hbs delete mode 100644 ui/packages/consul-ui/app/components/nav-selector/index.hbs delete mode 100644 ui/packages/consul-ui/app/components/nav-selector/index.js create mode 100644 ui/packages/consul-ui/app/components/skip-links/index.scss create mode 100644 ui/packages/consul-ui/app/components/skip-links/layout.scss create mode 100644 ui/packages/consul-ui/app/components/skip-links/skin.scss delete mode 100644 ui/packages/consul-ui/app/helpers/hcp-authentication-link.js delete mode 100644 ui/packages/consul-ui/app/helpers/hcp-resource-id-to-link.js delete mode 100644 ui/packages/consul-ui/app/routes/unavailable.js delete mode 100644 ui/packages/consul-ui/app/services/hcp-link-modal.js delete mode 100644 ui/packages/consul-ui/app/services/hcp-link-status.js delete mode 100644 ui/packages/consul-ui/app/services/repository/hcp-link.js create mode 100644 ui/packages/consul-ui/app/styles/base/color/ui/index.scss create mode 100644 ui/packages/consul-ui/app/styles/themes.scss delete mode 100644 ui/packages/consul-ui/app/templates/unavailable.hbs delete mode 100644 ui/packages/consul-ui/mock-api/api/hcp/v2/link/global delete mode 100644 ui/packages/consul-ui/mock-api/prefixed-api/api/hcp/v2/link/global delete mode 100644 ui/packages/consul-ui/tests/acceptance/link-to-hcp-test.js delete mode 100644 ui/packages/consul-ui/tests/acceptance/unavailable-test.js create mode 100644 ui/packages/consul-ui/tests/integration/components/auth-profile-test.js create mode 100644 ui/packages/consul-ui/tests/integration/components/consul/hcp/home-test.js delete mode 100644 ui/packages/consul-ui/tests/integration/components/hcp-nav-item-test.js delete mode 100644 ui/packages/consul-ui/tests/integration/components/link-to-hcp-banner-test.js delete mode 100644 ui/packages/consul-ui/tests/integration/components/link-to-hcp-modal-test.js delete mode 100644 ui/packages/consul-ui/tests/integration/helpers/hcp-authentication-link-test.js delete mode 100644 ui/packages/consul-ui/tests/integration/helpers/hcp-resource-id-to-link-test.js rename ui/packages/consul-ui/translations/components/{consul-copy-button => copy-button}/en-us.yaml (76%) delete mode 100644 ui/packages/consul-ui/translations/components/hashicorp-consul/en-us.yaml delete mode 100644 ui/packages/consul-ui/translations/components/link-to-hcp-banner/en-us.yaml delete mode 100644 version/versiontest/versiontest.go delete mode 100644 website/.husky/pre-commit delete mode 100644 website/content/api-docs/acl/templated-policies.mdx delete mode 100644 website/content/api-docs/exported-services.mdx delete mode 100644 website/content/api-docs/hcp-link.mdx delete mode 100644 website/content/commands/acl/templated-policy/index.mdx delete mode 100644 website/content/commands/acl/templated-policy/list.mdx delete mode 100644 website/content/commands/acl/templated-policy/preview.mdx delete mode 100644 website/content/commands/acl/templated-policy/read.mdx delete mode 100644 website/content/commands/peering/exported-services.mdx delete mode 100644 website/content/commands/services/exported-services.mdx create mode 100644 website/content/docs/agent/rpc.mdx rename website/content/docs/{connect/gateways => }/api-gateway/configuration/gateway.mdx (95%) rename website/content/docs/{connect/gateways => }/api-gateway/configuration/gatewayclass.mdx (95%) rename website/content/docs/{connect/gateways => }/api-gateway/configuration/gatewayclassconfig.mdx (100%) create mode 100644 website/content/docs/api-gateway/configuration/index.mdx rename website/content/docs/{connect/gateways => }/api-gateway/configuration/meshservice.mdx (100%) rename website/content/docs/{connect/gateways => }/api-gateway/configuration/routes.mdx (55%) create mode 100644 website/content/docs/api-gateway/index.mdx create mode 100644 website/content/docs/api-gateway/install.mdx create mode 100644 website/content/docs/api-gateway/tech-specs.mdx rename website/content/docs/{connect/gateways/api-gateway/upgrades-k8s.mdx => api-gateway/upgrades.mdx} (98%) rename website/content/docs/{connect/gateways/api-gateway => api-gateway/usage}/errors.mdx (96%) rename website/content/docs/{connect/gateways/api-gateway/define-routes => api-gateway/usage}/reroute-http-requests.mdx (66%) rename website/content/docs/{connect/gateways/api-gateway/define-routes => api-gateway/usage}/route-to-peered-services.mdx (57%) create mode 100644 website/content/docs/api-gateway/usage/usage.mdx delete mode 100644 website/content/docs/architecture/catalog/v1.mdx delete mode 100644 website/content/docs/architecture/catalog/v2.mdx delete mode 100644 website/content/docs/connect/config-entries/http-route.mdx rename website/content/docs/connect/{manage-traffic => }/failover/index.mdx (61%) rename website/content/docs/connect/{config-entries => gateways/api-gateway/configuration}/api-gateway.mdx (52%) delete mode 100644 website/content/docs/connect/gateways/api-gateway/configuration/gatewaypolicy.mdx create mode 100644 website/content/docs/connect/gateways/api-gateway/configuration/http-route.mdx delete mode 100644 website/content/docs/connect/gateways/api-gateway/configuration/index.mdx rename website/content/docs/connect/{config-entries => gateways/api-gateway/configuration}/inline-certificate.mdx (97%) delete mode 100644 website/content/docs/connect/gateways/api-gateway/configuration/routeauthfilter.mdx delete mode 100644 website/content/docs/connect/gateways/api-gateway/configuration/routeretryfilter.mdx delete mode 100644 website/content/docs/connect/gateways/api-gateway/configuration/routetimeoutfilter.mdx rename website/content/docs/connect/{config-entries => gateways/api-gateway/configuration}/tcp-route.mdx (98%) delete mode 100644 website/content/docs/connect/gateways/api-gateway/define-routes/routes-k8s.mdx delete mode 100644 website/content/docs/connect/gateways/api-gateway/define-routes/routes-vms.mdx delete mode 100644 website/content/docs/connect/gateways/api-gateway/deploy/listeners-k8s.mdx delete mode 100644 website/content/docs/connect/gateways/api-gateway/deploy/listeners-vms.mdx delete mode 100644 website/content/docs/connect/gateways/api-gateway/install-k8s.mdx delete mode 100644 website/content/docs/connect/gateways/api-gateway/secure-traffic/encrypt-vms.mdx delete mode 100644 website/content/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-k8s.mdx delete mode 100644 website/content/docs/connect/gateways/api-gateway/secure-traffic/verify-jwts-vms.mdx delete mode 100644 website/content/docs/connect/gateways/api-gateway/tech-specs.mdx create mode 100644 website/content/docs/connect/gateways/api-gateway/usage.mdx rename website/content/docs/connect/{manage-traffic => l7-traffic}/discovery-chain.mdx (100%) rename website/content/docs/connect/{manage-traffic => l7-traffic}/index.mdx (90%) delete mode 100644 website/content/docs/connect/manage-traffic/failover/sameness.mdx delete mode 100644 website/content/docs/connect/manage-traffic/limit-request-rates.mdx delete mode 100644 website/content/docs/connect/manage-traffic/route-to-local-upstreams.mdx delete mode 100644 website/content/docs/connect/proxies/envoy-extensions/configuration/otel-access-logging.mdx delete mode 100644 website/content/docs/connect/proxies/envoy-extensions/usage/otel-access-logging.mdx rename website/content/docs/ecs/{reference => }/compatibility.mdx (89%) rename website/content/docs/ecs/{reference => }/configuration-reference.mdx (93%) delete mode 100644 website/content/docs/ecs/deploy/bind-addresses.mdx delete mode 100644 website/content/docs/ecs/deploy/configure-routes.mdx delete mode 100644 website/content/docs/ecs/deploy/manual.mdx delete mode 100644 website/content/docs/ecs/deploy/migrate-existing-tasks.mdx delete mode 100644 website/content/docs/ecs/deploy/terraform.mdx create mode 100644 website/content/docs/ecs/manual/acl-controller.mdx create mode 100644 website/content/docs/ecs/manual/install.mdx create mode 100644 website/content/docs/ecs/manual/secure-configuration.mdx delete mode 100644 website/content/docs/ecs/reference/consul-server-json.mdx create mode 100644 website/content/docs/ecs/requirements.mdx create mode 100644 website/content/docs/ecs/task-resource-usage.mdx delete mode 100644 website/content/docs/ecs/tech-specs.mdx create mode 100644 website/content/docs/ecs/terraform/install.mdx create mode 100644 website/content/docs/ecs/terraform/migrate-existing-tasks.mdx create mode 100644 website/content/docs/ecs/terraform/secure-configuration.mdx delete mode 100644 website/content/docs/ecs/upgrade-to-dataplanes.mdx delete mode 100644 website/content/docs/enterprise/ent-to-ce-downgrades.mdx delete mode 100644 website/content/docs/k8s/multiport/configure.mdx delete mode 100644 website/content/docs/k8s/multiport/index.mdx delete mode 100644 website/content/docs/k8s/multiport/reference/grpcroute.mdx delete mode 100644 website/content/docs/k8s/multiport/reference/httproute.mdx delete mode 100644 website/content/docs/k8s/multiport/reference/proxyconfiguration.mdx delete mode 100644 website/content/docs/k8s/multiport/reference/resource-command.mdx delete mode 100644 website/content/docs/k8s/multiport/reference/tcproute.mdx delete mode 100644 website/content/docs/k8s/multiport/reference/trafficpermissions.mdx delete mode 100644 website/content/docs/k8s/multiport/traffic-split.mdx delete mode 100644 website/content/docs/release-notes/consul-ecs/v0_7_x.mdx delete mode 100644 website/content/docs/release-notes/consul-k8s/v1_3_x.mdx delete mode 100644 website/content/docs/release-notes/consul/v1_17_x.mdx delete mode 100644 website/public/img/ecs/consul-on-ecs-architecture-dataplanes-dark.png delete mode 100644 website/public/img/ecs/consul-on-ecs-architecture-dataplanes.png diff --git a/.changelog/17107.txt b/.changelog/17107.txt deleted file mode 100644 index 5694fca2c9cc8..0000000000000 --- a/.changelog/17107.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:breaking-change -api: RaftLeaderTransfer now requires an id string. An empty string can be specified to keep the old behavior. -``` diff --git a/.changelog/17155.txt b/.changelog/17155.txt deleted file mode 100644 index 03cec33e991af..0000000000000 --- a/.changelog/17155.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -config: Add new `tls.defaults.verify_server_hostname` configuration option. This specifies the default value for any interfaces that support the `verify_server_hostname` option. -``` diff --git a/.changelog/17481.txt b/.changelog/17481.txt deleted file mode 100644 index 89ad16998e836..0000000000000 --- a/.changelog/17481.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -tlsutil: Default setting of ServerName field in outgoing TLS configuration for checks now handled by crypto/tls. -``` diff --git a/.changelog/17593.txt b/.changelog/17593.txt deleted file mode 100644 index 1f84e75f57427..0000000000000 --- a/.changelog/17593.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -docs: fix list of telemetry metrics -``` diff --git a/.changelog/17694.txt b/.changelog/17694.txt deleted file mode 100644 index 703b100d1d3a3..0000000000000 --- a/.changelog/17694.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -Windows: support consul connect envoy command on Windows -``` diff --git a/.changelog/17754.txt b/.changelog/17754.txt index 32272ec1ae911..56ab20dc213ef 100644 --- a/.changelog/17754.txt +++ b/.changelog/17754.txt @@ -1,3 +1,3 @@ ```release-note:feature -ui: Display the Consul agent version in the nodes list, and allow filtering and sorting of nodes based on versions. -``` \ No newline at end of file +ui: consul version is displayed in nodes list with filtering and sorting based on versions +``` diff --git a/.changelog/17831.txt b/.changelog/17831.txt deleted file mode 100644 index 2833bda1d5765..0000000000000 --- a/.changelog/17831.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ca: Vault CA provider config no longer requires root_pki_path for secondary datacenters -``` diff --git a/.changelog/17936.txt b/.changelog/17936.txt deleted file mode 100644 index 61f5117d8712f..0000000000000 --- a/.changelog/17936.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -acl: Add new `acl.tokens.dns` config field which specifies the token used implicitly during dns checks. -``` diff --git a/.changelog/18007.txt b/.changelog/18007.txt deleted file mode 100644 index b963d2f77fcd6..0000000000000 --- a/.changelog/18007.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -Windows: Integration tests for Consul Windows VMs -``` diff --git a/.changelog/18300.txt b/.changelog/18300.txt deleted file mode 100644 index 717a697777e26..0000000000000 --- a/.changelog/18300.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -connect: update supported envoy versions to 1.24.10, 1.25.9, 1.26.4, 1.27.0 -``` diff --git a/.changelog/18303.txt b/.changelog/18303.txt new file mode 100644 index 0000000000000..4afc4473b7c90 --- /dev/null +++ b/.changelog/18303.txt @@ -0,0 +1,3 @@ +```release-note:improvement +connect: update supported envoy versions to 1.23.12, 1.24.10, 1.25.9, 1.26.4 +``` diff --git a/.changelog/18324.txt b/.changelog/18324.txt deleted file mode 100644 index 6d1f11a92e969..0000000000000 --- a/.changelog/18324.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -api-gateway: add retry and timeout filters -``` \ No newline at end of file diff --git a/.changelog/18336.txt b/.changelog/18336.txt deleted file mode 100644 index 5d91046ec5efd..0000000000000 --- a/.changelog/18336.txt +++ /dev/null @@ -1,7 +0,0 @@ -```release-note:feature -xds: Add a built-in Envoy extension that appends OpenTelemetry Access Logging (otel-access-logging) to the HTTP Connection Manager filter. -``` - -```release-note:feature -xds: Add support for patching outbound listeners to the built-in Envoy External Authorization extension. -``` diff --git a/.changelog/18367.txt b/.changelog/18367.txt deleted file mode 100644 index 578cf7091853f..0000000000000 --- a/.changelog/18367.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -dev-mode: Fix dev mode has new line in responses. Now new line is added only when url has pretty query parameter. -``` diff --git a/.changelog/18439.txt b/.changelog/18439.txt deleted file mode 100644 index dd12738d5c387..0000000000000 --- a/.changelog/18439.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -Support custom watches on the Consul Controller framework. -``` diff --git a/.changelog/18504.txt b/.changelog/18504.txt deleted file mode 100644 index 6df042237a93c..0000000000000 --- a/.changelog/18504.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -dataplane: Allow getting bootstrap parameters when using V2 APIs -``` \ No newline at end of file diff --git a/.changelog/18560.txt b/.changelog/18560.txt deleted file mode 100644 index 118fad95306d7..0000000000000 --- a/.changelog/18560.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Use Community verbiage -``` \ No newline at end of file diff --git a/.changelog/18573.txt b/.changelog/18573.txt deleted file mode 100644 index ce03f1c55baf7..0000000000000 --- a/.changelog/18573.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:enhancement -xds: Use downstream protocol when connecting to local app -``` diff --git a/.changelog/18583.txt b/.changelog/18583.txt deleted file mode 100644 index 8121e01ced0db..0000000000000 --- a/.changelog/18583.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -mesh: **(Enterprise only)** Adds rate limiting config to service-defaults -``` diff --git a/.changelog/18646.txt b/.changelog/18646.txt deleted file mode 100644 index 93da75819901a..0000000000000 --- a/.changelog/18646.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -api-gateway: Add support for response header modifiers on http-route configuration entry -``` diff --git a/.changelog/18668.txt b/.changelog/18668.txt deleted file mode 100644 index fdd63e1c6d4b6..0000000000000 --- a/.changelog/18668.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:breaking-change -audit-logging: **(Enterprise only)** allowing timestamp based filename only on rotation. initially the filename will be just file.json -``` diff --git a/.changelog/18708.txt b/.changelog/18708.txt deleted file mode 100644 index 66a34da789703..0000000000000 --- a/.changelog/18708.txt +++ /dev/null @@ -1,7 +0,0 @@ -```release-note:feature -acl: Added ACL Templated policies to simplify getting the right ACL token. -``` - -```release-note:improvement -cli: Added `-templated-policy`, `-templated-policy-file`, `-replace-templated-policy`, `-append-templated-policy`, `-replace-templated-policy-file`, `-append-templated-policy-file` and `-var` flags for creating or updating tokens/roles. -``` diff --git a/.changelog/18719.txt b/.changelog/18719.txt deleted file mode 100644 index 4da370b91b90f..0000000000000 --- a/.changelog/18719.txt +++ /dev/null @@ -1,7 +0,0 @@ -```release-note:feature -acl: Add BindRule support for templated policies. Add new BindType: templated-policy and BindVar field for templated policy variables. -``` - -```release-note:feature -cli: Add `bind-var` flag to `consul acl binding-rule` for templated policy variables. -``` \ No newline at end of file diff --git a/.changelog/18769.txt b/.changelog/18769.txt deleted file mode 100644 index b9cf8aa207465..0000000000000 --- a/.changelog/18769.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -acl: Adds a new ACL rule for workload identities -``` diff --git a/.changelog/18813.txt b/.changelog/18813.txt deleted file mode 100644 index 3dcb4612915b0..0000000000000 --- a/.changelog/18813.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -acl: Use templated policy to generate synthetic policies for tokens/roles with node and/or service identities -``` \ No newline at end of file diff --git a/.changelog/18816.txt b/.changelog/18816.txt deleted file mode 100644 index ef8989ee705b3..0000000000000 --- a/.changelog/18816.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -cli: Add `consul acl templated-policy` commands to read, list and preview templated policies. -``` \ No newline at end of file diff --git a/.changelog/18943.txt b/.changelog/18943.txt deleted file mode 100644 index adb027dee9fd9..0000000000000 --- a/.changelog/18943.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -api: added `CheckRegisterOpts` to Agent API -``` diff --git a/.changelog/18983.txt b/.changelog/18983.txt deleted file mode 100644 index 8a49a850eb356..0000000000000 --- a/.changelog/18983.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -api: added `Token` field to `ServiceRegisterOpts` type in Agent API -``` diff --git a/.changelog/18994.txt b/.changelog/18994.txt deleted file mode 100644 index 3f80f34576716..0000000000000 --- a/.changelog/18994.txt +++ /dev/null @@ -1,20 +0,0 @@ -```release-note:feature -# Catalog v2 feature preview -This release provides the ability to preview Consul's v2 Catalog and Resource API if enabled. The new model supports -multi-port application deployments with only a single Envoy proxy. Note that the v1 and v2 catalogs are not cross -compatible, and not all Consul features are available within this v2 feature preview. See the [v2 Catalog and Resource -API documentation](https://developer.hashicorp.com/consul/docs/architecture/v2) for more information. The v2 Catalog and -Resources API should be considered a feature preview within this release and should not be used in production -environments. - -### Limitations -* The v2 catalog API feature preview does not support connections with client agents. As a result, it is only available for Kubernetes deployments, which use [Consul dataplanes](consul/docs/connect/dataplane) instead of client agents. -* The v1 and v2 catalog APIs cannot run concurrently. -* The Consul UI does not support multi-port services or the v2 catalog API in this release. -* HCP Consul does not support multi-port services or the v2 catalog API in this release. - -[[Catalog resource controllers]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/internal/catalog/internal/controllers) -[[Mesh resource controllers]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/internal/mesh/internal/controllers) -[[Auth resource controllers]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/internal/auth/internal) -[[V2 Protobufs]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/proto-public) -``` \ No newline at end of file diff --git a/.changelog/19077.txt b/.changelog/19077.txt deleted file mode 100644 index d7c9b0483c7c4..0000000000000 --- a/.changelog/19077.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -acl: Adds workload identity templated policy -``` diff --git a/.changelog/19120.txt b/.changelog/19120.txt new file mode 100644 index 0000000000000..e7e5d3274fa1d --- /dev/null +++ b/.changelog/19120.txt @@ -0,0 +1,3 @@ +```release-note:bug +api-gateway: fix matching for different hostnames on the same listener +``` diff --git a/.changelog/19218.txt b/.changelog/19218.txt deleted file mode 100644 index a3dde32317b47..0000000000000 --- a/.changelog/19218.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -resource: lowercase names enforced for v2 resources only. -``` \ No newline at end of file diff --git a/.changelog/19273.txt b/.changelog/19273.txt new file mode 100644 index 0000000000000..b5264e101980b --- /dev/null +++ b/.changelog/19273.txt @@ -0,0 +1,3 @@ +```release-note:security +connect: update supported envoy versions to 1.24.12, 1.25.11, 1.26.6 to address [CVE-2023-44487](https://github.com/envoyproxy/envoy/security/advisories/GHSA-jhv4-f7mr-xx76) +``` diff --git a/.changelog/19306.txt b/.changelog/19306.txt deleted file mode 100644 index 81c0b638b9587..0000000000000 --- a/.changelog/19306.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:security -connect: update supported envoy versions to 1.24.12, 1.25.11, 1.26.6, 1.27.2 to address [CVE-2023-44487](https://github.com/envoyproxy/envoy/security/advisories/GHSA-jhv4-f7mr-xx76) -``` diff --git a/.changelog/19311.txt b/.changelog/19311.txt deleted file mode 100644 index e53536f44d32b..0000000000000 --- a/.changelog/19311.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -raft: Fix panic during downgrade from enterprise to oss. -``` \ No newline at end of file diff --git a/.changelog/19314.txt b/.changelog/19314.txt deleted file mode 100644 index c5f1346f3bfe0..0000000000000 --- a/.changelog/19314.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:enhancement -raft: upgrade raft-wal library version to 0.4.1. -``` diff --git a/.changelog/19342.txt b/.changelog/19342.txt deleted file mode 100644 index ac8559fd482dd..0000000000000 --- a/.changelog/19342.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -Replaces UI Side Nav with Helios Design System Side Nav. Adds dc/partition/namespace searching in Side Nav. -``` diff --git a/.changelog/19389.txt b/.changelog/19389.txt deleted file mode 100644 index 1fe521b853812..0000000000000 --- a/.changelog/19389.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cli: stop simultaneous usage of -templated-policy and -templated-policy-file when creating a role or token. -``` \ No newline at end of file diff --git a/.changelog/19443.txt b/.changelog/19443.txt new file mode 100644 index 0000000000000..6541c16938202 --- /dev/null +++ b/.changelog/19443.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: only show hcp link if url is present +``` diff --git a/.changelog/19499.txt b/.changelog/19499.txt deleted file mode 100644 index 83849637d4138..0000000000000 --- a/.changelog/19499.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -acl: add policy bindtype to binding rules. -``` \ No newline at end of file diff --git a/.changelog/19549.txt b/.changelog/19549.txt deleted file mode 100644 index 752dc3f8728ec..0000000000000 --- a/.changelog/19549.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: clear peer on home logo link -``` diff --git a/.changelog/19586.txt b/.changelog/19586.txt deleted file mode 100644 index 2c58b38b55785..0000000000000 --- a/.changelog/19586.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: fix being able to view peered services from non-default namnespaces -``` diff --git a/.changelog/19594.txt b/.changelog/19594.txt deleted file mode 100644 index 9348b98bf5046..0000000000000 --- a/.changelog/19594.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: move nspace and partitions requests into their selector menus -``` diff --git a/.changelog/19647.txt b/.changelog/19647.txt deleted file mode 100644 index 33b91ef01cbd4..0000000000000 --- a/.changelog/19647.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -connect: Add `CaseInsensitive` flag to service-routers that allows paths and path prefixes to ignore URL upper and lower casing. -``` diff --git a/.changelog/19666.txt b/.changelog/19666.txt deleted file mode 100644 index c3880be4d10af..0000000000000 --- a/.changelog/19666.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -api: Add support for listing ACL tokens by service name when using templated policies. -``` \ No newline at end of file diff --git a/.changelog/19728.txt b/.changelog/19728.txt deleted file mode 100644 index 53c61bc5e0e85..0000000000000 --- a/.changelog/19728.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -acl: add api-gateway templated policy -``` \ No newline at end of file diff --git a/.changelog/19735.txt b/.changelog/19735.txt deleted file mode 100644 index b7a712ced6e96..0000000000000 --- a/.changelog/19735.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -acl: add templated policy descriptions -``` \ No newline at end of file diff --git a/.changelog/19821.txt b/.changelog/19821.txt deleted file mode 100644 index ee88046faa061..0000000000000 --- a/.changelog/19821.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -cli: Adds new subcommand `peering exported-services` to list services exported to a peer . Refer to the [CLI docs](https://developer.hashicorp.com/consul/commands/peering) for more information. -``` diff --git a/.changelog/19827.txt b/.changelog/19827.txt deleted file mode 100644 index 6a3d9a7ec8270..0000000000000 --- a/.changelog/19827.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -acl: Adds nomad client templated policy -``` diff --git a/.changelog/19879.txt b/.changelog/19879.txt deleted file mode 100644 index 12e7e2d75f8fb..0000000000000 --- a/.changelog/19879.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:security -mesh: update supported envoy version 1.28.0 in addition to 1.25.11, 1.26.6, 1.27.2, 1.28.0 to address [CVE-2023-44487](https://github.com/envoyproxy/envoy/security/advisories/GHSA-jhv4-f7mr-xx76) -``` \ No newline at end of file diff --git a/.changelog/19907.txt b/.changelog/19907.txt deleted file mode 100644 index 905794114fbf4..0000000000000 --- a/.changelog/19907.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: stop manually reconciling services if peering is enabled -``` diff --git a/.changelog/19940.txt b/.changelog/19940.txt index b83ba176e6dc1..2b314e93918a1 100644 --- a/.changelog/19940.txt +++ b/.changelog/19940.txt @@ -1,3 +1,3 @@ ```release-note:improvement -connect: Replace usage of deprecated Envoy fields `envoy.config.cluster.v3.Cluster.http2_protocol_options` and `envoy.config.bootstrap.v3.Admin.access_log_path`. +xds: remove usages of deprecated Envoy fields: `envoy.config.cluster.v3.Cluster.http2_protocol_options`, `envoy.config.bootstrap.v3.Admin.access_log_path` ``` \ No newline at end of file diff --git a/.changelog/19943.txt b/.changelog/19943.txt deleted file mode 100644 index fff1b52d6c100..0000000000000 --- a/.changelog/19943.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:deprecation -cli: Deprecate the `-admin-access-log-path` flag from `consul connect envoy` command in favor of: `-admin-access-log-config`. -``` diff --git a/.changelog/19992.txt b/.changelog/19992.txt deleted file mode 100644 index 58c26440f74ef..0000000000000 --- a/.changelog/19992.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:breaking-change -config-entries: Allow disabling request and idle timeouts with negative values in service router and service resolver config entries. -``` diff --git a/.changelog/20010.txt b/.changelog/20010.txt index 1ae89d88b70b1..c9e957cc43747 100644 --- a/.changelog/20010.txt +++ b/.changelog/20010.txt @@ -1,3 +1,3 @@ ```release-note:improvement -connect: Replace usage of deprecated Envoy field `envoy.config.cluster.v3.Cluster.http_protocol_options`. +xds: Replace usage of deprecated Envoy field `envoy.config.cluster.v3.Cluster.http_protocol_options` ``` \ No newline at end of file diff --git a/.changelog/20011.txt b/.changelog/20011.txt index 12a70a84f2f74..e20c69508bdc0 100644 --- a/.changelog/20011.txt +++ b/.changelog/20011.txt @@ -1,3 +1,3 @@ ```release-note:improvement -connect: Replace usage of deprecated Envoy field `envoy.config.router.v3.WeightedCluster.total_weight`. +connect: replace usage of deprecated Envoy field `envoy.config.router.v3.WeightedCluster.total_weight`. ``` \ No newline at end of file diff --git a/.changelog/20012.txt b/.changelog/20012.txt index cf1b26b09217b..6f9cc45556518 100644 --- a/.changelog/20012.txt +++ b/.changelog/20012.txt @@ -1,3 +1,3 @@ ```release-note:improvement -connect: Replace usage of deprecated Envoy field `envoy.extensions.filters.http.lua.v3.Lua.inline_code`. +xds: replace usage of deprecated Envoy field `envoy.extensions.filters.http.lua.v3.Lua.inline_code` ``` diff --git a/.changelog/20013.txt b/.changelog/20013.txt deleted file mode 100644 index d9c8349f97fec..0000000000000 --- a/.changelog/20013.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -connect: Replace usage of deprecated Envoy fields `envoy.config.route.v3.HeaderMatcher.safe_regex_match` and `envoy.type.matcher.v3.RegexMatcher.google_re2`. -``` \ No newline at end of file diff --git a/.changelog/20015.txt b/.changelog/20015.txt deleted file mode 100644 index 7ca2b7b04363c..0000000000000 --- a/.changelog/20015.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -api: add a new api(/v1/exported-services) to list all the exported service and their consumers. -``` \ No newline at end of file diff --git a/.changelog/20023.txt b/.changelog/20023.txt deleted file mode 100644 index 812573bf97e3f..0000000000000 --- a/.changelog/20023.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:security -Update `golang.org/x/crypto` to v0.17.0 to address [CVE-2023-48795](https://nvd.nist.gov/vuln/detail/CVE-2023-48795). -``` diff --git a/.changelog/20078.txt b/.changelog/20078.txt deleted file mode 100644 index 00fd03f486021..0000000000000 --- a/.changelog/20078.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -connect: Replace usage of deprecated Envoy field `envoy.config.core.v3.HeaderValueOption.append`. -``` diff --git a/.changelog/20111.txt b/.changelog/20111.txt deleted file mode 100644 index ae1c31ac5b993..0000000000000 --- a/.changelog/20111.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -UI: fix autofocus on search box when page refreshes on typing in it due to url change. -``` diff --git a/.changelog/20220.txt b/.changelog/20220.txt deleted file mode 100644 index aca1a80961958..0000000000000 --- a/.changelog/20220.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -cloud: unconditionally add Access-Control-Expose-Headers HTTP header -``` diff --git a/.changelog/20275.txt b/.changelog/20275.txt deleted file mode 100644 index e82fc542bcacd..0000000000000 --- a/.changelog/20275.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -ui: Added a banner to let users link their clusters to HCP -``` \ No newline at end of file diff --git a/.changelog/20299.txt b/.changelog/20299.txt deleted file mode 100644 index b1c1538e674b2..0000000000000 --- a/.changelog/20299.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -v2: prevent use of the v2 experiments in secondary datacenters for now -``` diff --git a/.changelog/20308.txt b/.changelog/20308.txt deleted file mode 100644 index 3dade6fb0dbd2..0000000000000 --- a/.changelog/20308.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -docs: add Link API documentation -``` diff --git a/.changelog/20312.txt b/.changelog/20312.txt deleted file mode 100644 index f42c4f17becd9..0000000000000 --- a/.changelog/20312.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:feature -cloud: Adds new API/CLI to initiate and manage linking a Consul cluster to HCP Consul Central -``` -```release-note:breaking-change -telemetry: Adds fix to always use the value of `telemetry.disable_hostname` when determining whether to prefix gauge-type metrics with the hostname of the Consul agent. Previously, if only the default metric sink was enabled, this configuration was ignored and always treated as `true`, even though its default value is `false`. -``` \ No newline at end of file diff --git a/.changelog/20331.txt b/.changelog/20331.txt deleted file mode 100644 index 245e0eda693a6..0000000000000 --- a/.changelog/20331.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -cli: Adds new command `exported-services` to list all services exported and their consumers. Refer to the [CLI docs](https://developer.hashicorp.com/consul/commands/exported-services) for more information. -``` diff --git a/.changelog/20352.txt b/.changelog/20352.txt deleted file mode 100644 index d9faed27af678..0000000000000 --- a/.changelog/20352.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -logging: add /api prefix to v2 resource endpoint logs -``` \ No newline at end of file diff --git a/.changelog/20353.txt b/.changelog/20353.txt deleted file mode 100644 index d1d8f4ebc2070..0000000000000 --- a/.changelog/20353.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -ui: adds V2CatalogEnabled to config that is passed to the ui -``` diff --git a/.changelog/20359.txt b/.changelog/20359.txt deleted file mode 100644 index c010b32ef5793..0000000000000 --- a/.changelog/20359.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -ui: Adds a redirect and warning message around unavailable UI with V2 enabled -``` diff --git a/.changelog/20474.txt b/.changelog/20474.txt deleted file mode 100644 index 8c0d514b866d7..0000000000000 --- a/.changelog/20474.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:breaking-change -ui: Adds a "Link to HCP Consul Central" modal with integration to side-nav and link to HCP banner. There will be an option to disable the Link to HCP banner from the UI in a follow-up release. -``` \ No newline at end of file diff --git a/.changelog/20514.txt b/.changelog/20514.txt deleted file mode 100644 index 45eedfd82c2a3..0000000000000 --- a/.changelog/20514.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -hcp: fix error logs when failing to push metrics -``` diff --git a/.changelog/20544.txt b/.changelog/20544.txt deleted file mode 100644 index bfe3cbd6ff9d6..0000000000000 --- a/.changelog/20544.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -agent: Introduces a new agent config default_intention_policy to decouple the default intention behavior from ACLs -``` diff --git a/.changelog/20586.txt b/.changelog/20586.txt new file mode 100644 index 0000000000000..107db1f921136 --- /dev/null +++ b/.changelog/20586.txt @@ -0,0 +1,3 @@ +```release-note:security +mesh: Update Envoy version to 1.26.7 to address [CVE-2024-23324](https://github.com/envoyproxy/envoy/security/advisories/GHSA-gq3v-vvhj-96j6), [CVE-2024-23325](https://github.com/envoyproxy/envoy/security/advisories/GHSA-5m7c-mrwr-pm26), [CVE-2024-23322](https://github.com/envoyproxy/envoy/security/advisories/GHSA-6p83-mfmh-qv38), [CVE-2024-23323](https://github.com/envoyproxy/envoy/security/advisories/GHSA-x278-4w4x-r7ch), [CVE-2024-23327](https://github.com/envoyproxy/envoy/security/advisories/GHSA-4h5x-x9vh-m29j), and [CVE-2023-44487](https://github.com/envoyproxy/envoy/security/advisories/GHSA-jhv4-f7mr-xx76) +``` diff --git a/.changelog/20589.txt b/.changelog/20589.txt deleted file mode 100644 index 533dc4cc4c392..0000000000000 --- a/.changelog/20589.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:security -mesh: Update Envoy versions to 1.28.1, 1.27.3, and 1.26.7 to address [CVE-2024-23324](https://github.com/envoyproxy/envoy/security/advisories/GHSA-gq3v-vvhj-96j6), [CVE-2024-23325](https://github.com/envoyproxy/envoy/security/advisories/GHSA-5m7c-mrwr-pm26), [CVE-2024-23322](https://github.com/envoyproxy/envoy/security/advisories/GHSA-6p83-mfmh-qv38), [CVE-2024-23323](https://github.com/envoyproxy/envoy/security/advisories/GHSA-x278-4w4x-r7ch), [CVE-2024-23327](https://github.com/envoyproxy/envoy/security/advisories/GHSA-4h5x-x9vh-m29j), and [CVE-2023-44487](https://github.com/envoyproxy/envoy/security/advisories/GHSA-jhv4-f7mr-xx76) -``` diff --git a/.changelog/20642.txt b/.changelog/20642.txt deleted file mode 100644 index 0f224654cbd8f..0000000000000 --- a/.changelog/20642.txt +++ /dev/null @@ -1,7 +0,0 @@ -```release-note:bug -server: Ensure internal streams are properly terminated on snapshot restore. -``` - -```release-note:bug -server: Ensure controllers are automatically restarted on internal stream errors. -``` diff --git a/.changelog/20643.txt b/.changelog/20643.txt deleted file mode 100644 index 5c16c8784d441..0000000000000 --- a/.changelog/20643.txt +++ /dev/null @@ -1,7 +0,0 @@ -```release-note:feature -dns: adds experimental support for a refactored DNS server that is v1 and v2 Catalog compatible. -Use `v2dns` in the `experiments` agent config to enable. -It will automatically be enabled when using the `resource-apis` (Catalog v2) experiment. -The new DNS implementation will be the default in Consul 1.19. -See the [Consul 1.18.x Release Notes](https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_18_x) for deprecated DNS features. -``` diff --git a/.changelog/20679.txt b/.changelog/20679.txt deleted file mode 100644 index 0efb6b276336e..0000000000000 --- a/.changelog/20679.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -dns: SERVFAIL when resolving not found PTR records. -``` \ No newline at end of file diff --git a/.changelog/_18366.txt b/.changelog/_18366.txt deleted file mode 100644 index 02a3599c2c27c..0000000000000 --- a/.changelog/_18366.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -config-entry(api-gateway): (Enterprise only) Add GatewayPolicy to APIGateway Config Entry listeners -``` diff --git a/.changelog/_18422.txt b/.changelog/_18422.txt deleted file mode 100644 index 4f0efbbe3b278..0000000000000 --- a/.changelog/_18422.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -config-entry(api-gateway): (Enterprise only) Add JWTFilter to HTTPRoute Filters -``` diff --git a/.changelog/_20721.txt b/.changelog/_20721.txt new file mode 100644 index 0000000000000..e8f45bd280d68 --- /dev/null +++ b/.changelog/_20721.txt @@ -0,0 +1,3 @@ +```release-note:bug +ingress-gateway: **(Enterprise Only)** Fix a bug where on update, Ingress Gateways lost all upstreams for listeners with wildcard services in a different namespace. +``` \ No newline at end of file diff --git a/.changelog/_6074.txt b/.changelog/_6074.txt deleted file mode 100644 index 6539fa6a4fa19..0000000000000 --- a/.changelog/_6074.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -connect: **(Enterprise only)** Fix bug where incorrect service-defaults entries were fetched to determine an upstream's protocol whenever the upstream did not explicitly define the namespace / partition. When this bug occurs, upstreams would use the protocol from a service-default entry in the default namespace / partition, rather than their own namespace / partition. -``` diff --git a/.changelog/_6870.txt b/.changelog/_6870.txt deleted file mode 100644 index b4c52f7d67554..0000000000000 --- a/.changelog/_6870.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -gateway: **(Enterprise only)** Add JWT authentication and authorization to APIGateway Listeners and HTTPRoutes. -``` diff --git a/.copywrite.hcl b/.copywrite.hcl index 1df5f1dce9d95..3fb2004de5641 100644 --- a/.copywrite.hcl +++ b/.copywrite.hcl @@ -19,16 +19,14 @@ project { # ignore specific test data files "agent/uiserver/testdata/**", - "internal/resourcehcl/testdata/**", # generated files "agent/structs/structs.deepcopy.go", "agent/proxycfg/proxycfg.deepcopy.go", "agent/grpc-middleware/rate_limit_mappings.gen.go", "agent/uiserver/dist/**", - - # ignoring policy embedded files - "agent/structs/acltemplatedpolicy/policies/ce/**", + "agent/consul/state/catalog_schema.deepcopy.go", + "agent/config/config.deepcopy.go", # licensed under MPL - ignoring for now until the copywrite tool can support # multiple licenses per repo. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 16480bb6f4f94..36f9456693ff3 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 blank_issues_enabled: false contact_links: diff --git a/.github/dependabot.yml b/.github/dependabot.yml index d618028e100bd..05387a7c9b803 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 version: 2 updates: diff --git a/.github/pr-labeler.yml b/.github/pr-labeler.yml index fd39f2ccab423..e10f3c1376ef0 100644 --- a/.github/pr-labeler.yml +++ b/.github/pr-labeler.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 pr/dependencies: - vendor/**/* diff --git a/.github/scripts/changelog_checker.sh b/.github/scripts/changelog_checker.sh index a214ef2477979..e6b4d7f85dcc0 100755 --- a/.github/scripts/changelog_checker.sh +++ b/.github/scripts/changelog_checker.sh @@ -1,6 +1,6 @@ #!/bin/bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 set -euo pipefail diff --git a/.github/scripts/get_runner_classes.sh b/.github/scripts/get_runner_classes.sh index 603ed20ec7257..2e66a4e344c6c 100755 --- a/.github/scripts/get_runner_classes.sh +++ b/.github/scripts/get_runner_classes.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 # # This script generates tag-sets that can be used as runs-on: values to select runners. diff --git a/.github/scripts/get_runner_classes_windows.sh b/.github/scripts/get_runner_classes_windows.sh deleted file mode 100755 index ae75625974cf2..0000000000000 --- a/.github/scripts/get_runner_classes_windows.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# -# This script generates tag-sets that can be used as runs-on: values to select runners. - -set -euo pipefail - -case "$GITHUB_REPOSITORY" in -*-enterprise) - # shellcheck disable=SC2129 - echo "compute-small=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.2xlarge']" >>"$GITHUB_OUTPUT" - echo "compute-medium=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.4xlarge']" >>"$GITHUB_OUTPUT" - echo "compute-large=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.8xlarge']" >>"$GITHUB_OUTPUT" - # m5d.8xlarge is equivalent to our xl custom runner in CE - echo "compute-xl=['self-hosted', 'ondemand', 'os=windows-2019', 'type=m6a.12xlarge']" >>"$GITHUB_OUTPUT" - ;; -*) - # shellcheck disable=SC2129 - echo "compute-small=['windows-2019']" >>"$GITHUB_OUTPUT" - echo "compute-medium=['windows-2019']" >>"$GITHUB_OUTPUT" - echo "compute-large=['windows-2019']" >>"$GITHUB_OUTPUT" - echo "compute-xl=['windows-2019']" >>"$GITHUB_OUTPUT" - ;; -esac diff --git a/.github/scripts/metrics_checker.sh b/.github/scripts/metrics_checker.sh index 37659de4df8bd..a34cdf12fbec3 100755 --- a/.github/scripts/metrics_checker.sh +++ b/.github/scripts/metrics_checker.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 set -uo pipefail diff --git a/.github/scripts/notify_slack.sh b/.github/scripts/notify_slack.sh new file mode 100755 index 0000000000000..eacefaa91a439 --- /dev/null +++ b/.github/scripts/notify_slack.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -uo pipefail + +# This script is used in GitHub Actions pipelines to notify Slack of a job failure. + +if [[ $GITHUB_REF_NAME == "main" ]]; then + GITHUB_ENDPOINT="https://github.com/${GITHUB_REPOSITORY}/commit/${GITHUB_SHA}" + GITHUB_ACTIONS_ENDPOINT="https://github.com/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}" + COMMIT_MESSAGE=$(git log -1 --pretty=%B | head -n1) + SHORT_REF=$(git rev-parse --short "${GITHUB_SHA}") + curl -X POST -H 'Content-type: application/json' \ + --data \ + "{ \ + \"attachments\": [ \ + { \ + \"fallback\": \"GitHub Actions workflow failed!\", \ + \"text\": \"❌ Failed: \`${GITHUB_ACTOR}\`'s <${GITHUB_ACTIONS_ENDPOINT}|${GITHUB_JOB}> job failed for commit <${GITHUB_ENDPOINT}|${SHORT_REF}> on \`${GITHUB_REF_NAME}\`!\n\n- <${COMMIT_MESSAGE}\", \ + \"footer\": \"${GITHUB_REPOSITORY}\", \ + \"ts\": \"$(date +%s)\", \ + \"color\": \"danger\" \ + } \ + ] \ + }" "${FEED_CONSUL_GH_URL}" +else + echo "Not posting slack failure notifications for non-main branch" +fi diff --git a/.github/scripts/rerun_fails_report.sh b/.github/scripts/rerun_fails_report.sh index 90bae7a03a59d..ac6b7cf2ff9da 100755 --- a/.github/scripts/rerun_fails_report.sh +++ b/.github/scripts/rerun_fails_report.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 # # Add a comment on the github PR if there were any rerun tests. diff --git a/.github/scripts/set_test_package_matrix.sh b/.github/scripts/set_test_package_matrix.sh index 73ef720019332..3de3f83138a0b 100755 --- a/.github/scripts/set_test_package_matrix.sh +++ b/.github/scripts/set_test_package_matrix.sh @@ -1,19 +1,11 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 set -euo pipefail export RUNNER_COUNT=$1 -if ((RUNNER_COUNT < 2 )) -then - echo ERROR: RUNNER_COUNT must be greater than 1. Otherwise use the non-split unit test cod in .github/workflows/reusable-unit.yml. - exit 1 # terminate and indicate error -else - EFFECTIVE_RUNNER_COUNT=$((RUNNER_COUNT-1)) -fi - # set matrix var to list of unique packages containing tests -matrix="$(go list -json="ImportPath,TestGoFiles" ./... | jq --compact-output '. | select(.TestGoFiles != null) | select(.ImportPath != "github.com/hashicorp/consul/agent") | .ImportPath' | shuf | jq --slurp --compact-output '.' | jq --argjson runnercount $EFFECTIVE_RUNNER_COUNT -cM '[_nwise(length / $runnercount | ceil)]' | jq --compact-output '. += [["github.com/hashicorp/consul/agent"]]'))" +matrix="$(go list -json="ImportPath,TestGoFiles" ./... | jq --compact-output '. | select(.TestGoFiles != null) | .ImportPath' | shuf | jq --slurp --compact-output '.' | jq --argjson runnercount $RUNNER_COUNT -cM '[_nwise(length / $runnercount | floor)]'))" echo "matrix=${matrix}" >> "${GITHUB_OUTPUT}" diff --git a/.github/scripts/verify_artifact.sh b/.github/scripts/verify_artifact.sh index 3aa9e0848dfb3..48bfede1cb33f 100755 --- a/.github/scripts/verify_artifact.sh +++ b/.github/scripts/verify_artifact.sh @@ -1,6 +1,6 @@ #!/bin/bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 set -euo pipefail diff --git a/.github/scripts/verify_bin.sh b/.github/scripts/verify_bin.sh index dc5ac9f9f0e34..ff572d87fae9e 100755 --- a/.github/scripts/verify_bin.sh +++ b/.github/scripts/verify_bin.sh @@ -1,6 +1,6 @@ #!/bin/bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 set -euo pipefail diff --git a/.github/scripts/verify_deb.sh b/.github/scripts/verify_deb.sh index c6b6926c5d1a2..84a9a10c85639 100755 --- a/.github/scripts/verify_deb.sh +++ b/.github/scripts/verify_deb.sh @@ -1,6 +1,6 @@ #!/bin/bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 set -euo pipefail diff --git a/.github/scripts/verify_docker.sh b/.github/scripts/verify_docker.sh index ea9180920f8ae..fbbeb7dff464a 100755 --- a/.github/scripts/verify_docker.sh +++ b/.github/scripts/verify_docker.sh @@ -1,6 +1,6 @@ #!/bin/bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 set -euo pipefail diff --git a/.github/scripts/verify_envoy_version.sh b/.github/scripts/verify_envoy_version.sh index 4bcbea4cabeeb..bfbd969d4baab 100755 --- a/.github/scripts/verify_envoy_version.sh +++ b/.github/scripts/verify_envoy_version.sh @@ -1,10 +1,10 @@ #!/bin/bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 set -euo pipefail -current_branch=$GITHUB_REF_NAME +current_branch=$GITHUB_REF GITHUB_DEFAULT_BRANCH='main' if [ -z "$GITHUB_TOKEN" ]; then @@ -13,7 +13,7 @@ if [ -z "$GITHUB_TOKEN" ]; then fi if [ -z "$current_branch" ]; then - echo "GITHUB_REF_NAME must be set" + echo "GITHUB_REF must be set" exit 1 fi @@ -81,6 +81,7 @@ released_envoy_version=$(get_latest_envoy_version) major_released_envoy_version="${released_envoy_version[@]:1:4}" validate_envoy_version_main(){ + echo "verify "main" GitHub branch has latest envoy version" # Get envoy version for current branch ENVOY_VERSIONS=$(sanitize_consul_envoy_version | awk '{print $2}' | tr ',' ' ') envoy_version_main_branch=$(get_major_version ${ENVOY_VERSIONS}) @@ -122,8 +123,8 @@ echo checking out branch: "${current_branch}" git checkout "${current_branch}" echo -echo "Branch ${current_branch} => Consul version: ${CONSUL_VERSION}; Envoy Version: ${ENVOY_VERSIONS}" -echo "Branch ${GITHUB_DEFAULT_BRANCH} => Consul version: ${CONSUL_VERSION_DEFAULT_BRANCH}; Envoy Version: ${ENVOY_VERSIONS_DEFAULT_BRANCH}" +echo "Branch ${current_branch} =>Consul version: ${CONSUL_VERSION}; Envoy Version: ${ENVOY_VERSIONS}" +echo "Branch ${GITHUB_DEFAULT_BRANCH} =>Consul version: ${CONSUL_VERSION_DEFAULT_BRANCH}; Envoy Version: ${ENVOY_VERSIONS_DEFAULT_BRANCH}" ## Get major Consul and Envoy versions on release and default branch MAJOR_CONSUL_VERSION=$(get_major_version ${CONSUL_VERSION}) diff --git a/.github/scripts/verify_rpm.sh b/.github/scripts/verify_rpm.sh index 96cd658eef3d5..17709a1d90e1b 100755 --- a/.github/scripts/verify_rpm.sh +++ b/.github/scripts/verify_rpm.sh @@ -1,6 +1,6 @@ #!/bin/bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 set -euo pipefail diff --git a/.github/workflows/backport-assistant.yml b/.github/workflows/backport-assistant.yml index 78125b6dabed6..17462f811261c 100644 --- a/.github/workflows/backport-assistant.yml +++ b/.github/workflows/backport-assistant.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Run Backport Assistant for release branches run: | - backport-assistant backport -merge-method=squash + backport-assistant backport -merge-method=squash -gh-automerge env: BACKPORT_LABEL_REGEXP: "backport/(?P\\d+\\.\\d+)" BACKPORT_TARGET_TEMPLATE: "release/{{.target}}.x" diff --git a/.github/workflows/bot-auto-approve.yaml b/.github/workflows/bot-auto-approve.yaml index 911fc27f46968..2b652388999c2 100644 --- a/.github/workflows/bot-auto-approve.yaml +++ b/.github/workflows/bot-auto-approve.yaml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest if: github.actor == 'hc-github-team-consul-core' steps: - - uses: hmarr/auto-approve-action@44888193675f29a83e04faf4002fa8c0b537b1e4 # v3.2.1 + - uses: hmarr/auto-approve-action@v3 with: review-message: "Auto approved Consul Bot automated PR" github-token: ${{ secrets.MERGE_APPROVE_TOKEN }} diff --git a/.github/workflows/broken-link-check.yml b/.github/workflows/broken-link-check.yml index d3dddccae7951..b7c89ff3e75dc 100644 --- a/.github/workflows/broken-link-check.yml +++ b/.github/workflows/broken-link-check.yml @@ -6,17 +6,17 @@ name: Broken Link Check on: workflow_dispatch: schedule: - - cron: "0 0 1 * *" + - cron: "0 0 * * 1" jobs: linkChecker: runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v3 - name: Run lychee link checker id: lychee - uses: lycheeverse/lychee-action@ec3ed119d4f44ad2673a7232460dc7dff59d2421 # v1.8.0 + uses: lycheeverse/lychee-action@v1.6.1 with: args: ./website/content/docs/ --base https://developer.hashicorp.com/ --exclude-all-private --exclude '\.(svg|gif|jpg|png)' --exclude 'manage\.auth0\.com' --accept 403 --max-concurrency=24 --no-progress --verbose # Fail GitHub action when broken links are found? @@ -26,8 +26,8 @@ jobs: - name: Create GitHub Issue From lychee output file if: env.lychee_exit_code != 0 - uses: peter-evans/create-issue-from-file@433e51abf769039ee20ba1293a088ca19d573b7f # v4.0.1 + uses: peter-evans/create-issue-from-file@v4 with: title: Link Checker Report content-filepath: ./lychee/out.md - labels: report, automated issue + labels: report, automated issue \ No newline at end of file diff --git a/.github/workflows/build-artifacts.yml b/.github/workflows/build-artifacts.yml index 3c4fb7e669adc..e9fdf192fb2e2 100644 --- a/.github/workflows/build-artifacts.yml +++ b/.github/workflows/build-artifacts.yml @@ -13,7 +13,7 @@ permissions: contents: read env: - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + GOPRIVATE: github.com/hashicorp jobs: setup: @@ -25,7 +25,7 @@ jobs: compute-large: ${{ steps.setup-outputs.outputs.compute-large }} compute-xl: ${{ steps.setup-outputs.outputs.compute-xl }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - id: setup-outputs name: Setup outputs run: ./.github/scripts/get_runner_classes.sh @@ -61,14 +61,14 @@ jobs: kv/data/github/${{ github.repository }}/dockerhub username | DOCKERHUB_USERNAME; kv/data/github/${{ github.repository }}/dockerhub token | DOCKERHUB_TOKEN; - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: ENT specific step as we need to set elevated GitHub permissions. - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version: ${{ needs.get-go-version.outputs.go-version }} @@ -83,17 +83,17 @@ jobs: echo "GITHUB_BUILD_URL=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" >> $GITHUB_ENV - name: Set up Docker Buildx - uses: docker/setup-buildx-action@2a1a44ac4aa01993040736bd95bb470da1a38365 # v2.9.0 + uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # pin@v2.4.1 # NOTE: conditional specific logic as we store secrets in Vault in ENT and use GHA secrets in CE. - name: Login to Docker Hub - uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # pin@v2.1.0 with: username: ${{ endsWith(github.repository, '-enterprise') && steps.secrets.outputs.DOCKERHUB_USERNAME || secrets.DOCKERHUB_USERNAME }} password: ${{ endsWith(github.repository, '-enterprise') && steps.secrets.outputs.DOCKERHUB_TOKEN || secrets.DOCKERHUB_TOKEN }} - name: Docker build and push - uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 # v4.1.1 + uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # pin@v4.0.0 with: context: ./bin file: ./build-support/docker/Consul-Dev.dockerfile diff --git a/.github/workflows/build-distros.yml b/.github/workflows/build-distros.yml index 4930d77e36a50..ada2869cd3dfc 100644 --- a/.github/workflows/build-distros.yml +++ b/.github/workflows/build-distros.yml @@ -17,10 +17,6 @@ env: GOTAGS: ${{ endsWith(github.repository, '-enterprise') && 'consulent' || '' }} GOPRIVATE: github.com/hashicorp # Required for enterprise deps -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} - cancel-in-progress: true - jobs: setup: name: Setup @@ -31,7 +27,7 @@ jobs: compute-large: ${{ steps.setup-outputs.outputs.compute-large }} compute-xl: ${{ steps.setup-outputs.outputs.compute-xl }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - id: setup-outputs name: Setup outputs run: ./.github/scripts/get_runner_classes.sh @@ -60,7 +56,7 @@ jobs: XC_OS: "freebsd linux windows" runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git @@ -85,7 +81,7 @@ jobs: XC_OS: "darwin freebsd linux solaris windows" runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git @@ -111,7 +107,7 @@ jobs: CGO_ENABLED: 1 GOOS: linux steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git @@ -138,7 +134,7 @@ jobs: - check-go-mod runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0047d809b218d..6b0a1f85ec385 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -70,7 +70,7 @@ jobs: filepath: ${{ steps.generate-metadata-file.outputs.filepath }} steps: - name: 'Checkout directory' - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Generate metadata file id: generate-metadata-file uses: hashicorp/actions-generate-metadata@v1 @@ -104,12 +104,12 @@ jobs: name: Go ${{ needs.get-go-version.outputs.go-version }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Setup with node and yarn - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 with: - node-version: '18' + node-version: '14' cache: 'yarn' cache-dependency-path: 'ui/yarn.lock' @@ -195,12 +195,12 @@ jobs: name: Go ${{ needs.get-go-version.outputs.go-version }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Setup with node and yarn - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 with: - node-version: '18' + node-version: '14' cache: 'yarn' cache-dependency-path: 'ui/yarn.lock' @@ -247,12 +247,12 @@ jobs: name: Go ${{ needs.get-go-version.outputs.go-version }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Setup with node and yarn - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 with: - node-version: '18' + node-version: '14' cache: 'yarn' cache-dependency-path: 'ui/yarn.lock' @@ -302,7 +302,7 @@ jobs: version: ${{needs.set-product-version.outputs.product-version}} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 # Strip everything but MAJOR.MINOR from the version string and add a `-dev` suffix # This naming convention will be used ONLY for per-commit dev images @@ -329,8 +329,28 @@ jobs: docker.io/hashicorppreview/${{ env.repo }}:${{ env.minor_dev_tag }}-${{ github.sha }} smoke_test: .github/scripts/verify_docker.sh v${{ env.version }} + build-docker-ubi-redhat: + name: Docker Build UBI Image for RedHat Registry + needs: + - set-product-version + - build + runs-on: ubuntu-latest + env: + repo: ${{github.event.repository.name}} + version: ${{needs.set-product-version.outputs.product-version}} + + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - uses: hashicorp/actions-docker-build@v1 + with: + version: ${{env.version}} + target: ubi + arch: amd64 + redhat_tag: quay.io/redhat-isv-containers/60f9fdbec3a80eac643abedf:${{env.version}}-ubi + smoke_test: .github/scripts/verify_docker.sh v${{ env.version }} + build-docker-ubi-dockerhub: - name: Docker Build UBI Images + name: Docker Build UBI Image for DockerHub needs: - set-product-version - build @@ -340,7 +360,7 @@ jobs: version: ${{needs.set-product-version.outputs.product-version}} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 # Strip everything but MAJOR.MINOR from the version string and add a `-dev` suffix # This naming convention will be used ONLY for per-commit dev images @@ -365,7 +385,6 @@ jobs: docker.io/hashicorppreview/${{ env.repo }}:${{ env.minor_dev_tag }}-ubi docker.io/hashicorppreview/${{ env.repo }}:${{ env.minor_dev_tag }}-ubi-${{ github.sha }} smoke_test: .github/scripts/verify_docker.sh v${{ env.version }} - redhat_tag: quay.io/redhat-isv-containers/60f9fdbec3a80eac643abedf:${{env.version}}-ubi verify-linux: needs: @@ -386,7 +405,7 @@ jobs: name: Verify ${{ matrix.arch }} linux binary steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 if: ${{ endsWith(github.repository, '-enterprise') || matrix.arch != 's390x' }} - name: Download ${{ matrix.arch }} zip @@ -396,7 +415,7 @@ jobs: name: ${{ env.zip_name }} - name: Set up QEMU - uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0 + uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0 if: ${{ matrix.arch == 'arm' || matrix.arch == 'arm64' }} with: # this should be a comma-separated string as opposed to an array @@ -419,7 +438,7 @@ jobs: name: Verify amd64 darwin binary steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Download amd64 darwin zip uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 @@ -450,7 +469,7 @@ jobs: name: Verify ${{ matrix.arch }} debian package steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Set package version run: | @@ -466,7 +485,7 @@ jobs: name: ${{ env.pkg_name }} - name: Set up QEMU - uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0 + uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0 with: platforms: all @@ -491,7 +510,7 @@ jobs: name: Verify ${{ matrix.arch }} rpm steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Set package version run: | @@ -507,7 +526,7 @@ jobs: name: ${{ env.pkg_name }} - name: Set up QEMU - uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0 + uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0 with: platforms: all diff --git a/.github/workflows/changelog-checker.yml b/.github/workflows/changelog-checker.yml index 62b906eda3e66..d00717e2f0492 100644 --- a/.github/workflows/changelog-checker.yml +++ b/.github/workflows/changelog-checker.yml @@ -22,7 +22,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # by default the checkout action doesn't checkout all branches diff --git a/.github/workflows/copywrite.hcl b/.github/workflows/copywrite.hcl new file mode 100644 index 0000000000000..99d2e817072c6 --- /dev/null +++ b/.github/workflows/copywrite.hcl @@ -0,0 +1,24 @@ +name: Check Copywrite Headers + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + push: + branches: + - main + - release/** + +jobs: + copywrite: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + - uses: hashicorp/setup-copywrite@867a1a2a064a0626db322392806428f7dc59cb3e # v1.1.2 + name: Setup Copywrite + with: + version: v0.16.4 + archive-checksum: c299f830e6eef7e126a3c6ef99ac6f43a3c132d830c769e0d36fa347fa1af254 + - name: Check Header Compliance + run: make copywrite-headers +permissions: + contents: read diff --git a/.github/workflows/embedded-asset-checker.yml b/.github/workflows/embedded-asset-checker.yml index 38879945e209c..4bb07771bd68f 100644 --- a/.github/workflows/embedded-asset-checker.yml +++ b/.github/workflows/embedded-asset-checker.yml @@ -20,7 +20,7 @@ jobs: if: "! ( contains(github.event.pull_request.labels.*.name, 'pr/update-ui-assets') || github.event.pull_request.user.login == 'hc-github-team-consul-core' )" runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # by default the checkout action doesn't checkout all branches diff --git a/.github/workflows/frontend.yml b/.github/workflows/frontend.yml index 3fbab0d9cf472..ed1e0c2088f84 100644 --- a/.github/workflows/frontend.yml +++ b/.github/workflows/frontend.yml @@ -21,7 +21,7 @@ jobs: compute-large: ${{ steps.setup-outputs.outputs.compute-large }} compute-xl: ${{ steps.setup-outputs.outputs.compute-xl }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - id: setup-outputs name: Setup outputs run: ./.github/scripts/get_runner_classes.sh @@ -33,14 +33,14 @@ jobs: run: working-directory: ui steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # pin@v3.6.0 with: - node-version: '18' + node-version: '16' - name: Install Yarn - run: corepack enable + run: npm install -g yarn # Install dependencies. - name: install yarn packages @@ -53,14 +53,14 @@ jobs: needs: setup runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # pin@v3.6.0 with: - node-version: '18' + node-version: '16' - name: Install Yarn - run: corepack enable + run: npm install -g yarn # Install dependencies. - name: install yarn packages @@ -72,68 +72,27 @@ jobs: ember-build-test: needs: setup - if: ${{ !endsWith(github.repository, '-enterprise') }} - runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} strategy: matrix: partition: [1, 2, 3, 4] env: EMBER_TEST_REPORT: test-results/report-ce.xml # outputs test report for CI test summary EMBER_TEST_PARALLEL: true # enables test parallelization with ember-exam - CONSUL_NSPACES_ENABLED: 0 # NOTE: this should be 1 in ENT. + CONSUL_NSPACES_ENABLED: ${{ endsWith(github.repository, '-enterprise') && 1 || 0 }} # NOTE: this should be 1 in ENT. JOBS: 2 # limit parallelism for broccoli-babel-transpiler steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 - with: - node-version: '18' - - - name: Install Yarn - run: corepack enable - - - name: Install Chrome - uses: browser-actions/setup-chrome@c485fa3bab6be59dce18dbc18ef6ab7cbc8ff5f1 # v1.2.0 - - - name: Install dependencies - working-directory: ui - run: make deps - - - name: Build CI - working-directory: ui/packages/consul-ui - run: make build-ci - - - name: Ember exam - working-directory: ui/packages/consul-ui - run: node_modules/.bin/ember exam --split=4 --partition=${{ matrix.partition }} --path dist --silent -r xunit - - - name: Test Coverage CI - working-directory: ui/packages/consul-ui - run: make test-coverage-ci - - ember-build-test-ent: - needs: setup - runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} - strategy: - matrix: - partition: [1, 2, 3, 4] - env: - EMBER_TEST_REPORT: test-results/report-ce.xml # outputs test report for CI test summary - EMBER_TEST_PARALLEL: true # enables test parallelization with ember-exam - CONSUL_NSPACES_ENABLED: 1 # NOTE: this should be 1 in ENT. - JOBS: 2 # limit parallelism for broccoli-babel-transpiler - steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # pin@v3.6.0 with: - node-version: '18' + node-version: '16' - name: Install Yarn - run: corepack enable + run: npm install -g yarn - name: Install Chrome - uses: browser-actions/setup-chrome@c485fa3bab6be59dce18dbc18ef6ab7cbc8ff5f1 # v1.2.0 + uses: browser-actions/setup-chrome@29abc1a83d1d71557708563b4bc962d0f983a376 # pin@v1.2.1 - name: Install dependencies working-directory: ui @@ -150,6 +109,7 @@ jobs: - name: Test Coverage CI working-directory: ui/packages/consul-ui run: make test-coverage-ci + # This is job is required for branch protection as a required gihub check # because GitHub actions show up as checks at the job level and not the # workflow level. This is currently a feature request: diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index c298623144eb6..77f431b6080a6 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -4,12 +4,12 @@ on: pull_request: branches-ignore: - stable-website - - 'docs/**' - - 'ui/**' - - 'mktg-**' # Digital Team Terraform-generated branches' prefix - - 'backport/docs/**' - - 'backport/ui/**' - - 'backport/mktg-**' + - "docs/**" + - "ui/**" + - "mktg-**" # Digital Team Terraform-generated branches' prefix + - "backport/docs/**" + - "backport/ui/**" + - "backport/mktg-**" push: branches: # Push events on the main branch @@ -31,14 +31,14 @@ concurrency: jobs: conditional-skip: - runs-on: ubuntu-latest + runs-on: ubuntu-latest name: Get files changed and conditionally skip CI outputs: skip-ci: ${{ steps.read-files.outputs.skip-ci }} steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: - fetch-depth: 0 + fetch-depth: 0 - name: Get changed files id: read-files run: ./.github/scripts/filter_changed_files_go_test.sh @@ -54,18 +54,18 @@ jobs: compute-large: ${{ steps.setup-outputs.outputs.compute-large }} compute-xl: ${{ steps.setup-outputs.outputs.compute-xl }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - id: setup-outputs - name: Setup outputs - run: ./.github/scripts/get_runner_classes.sh + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - id: setup-outputs + name: Setup outputs + run: ./.github/scripts/get_runner_classes.sh get-go-version: uses: ./.github/workflows/reusable-get-go-version.yml check-go-mod: needs: - - setup - - get-go-version + - setup + - get-go-version uses: ./.github/workflows/reusable-check-go-mod.yml with: runs-on: ${{ needs.setup.outputs.compute-small }} @@ -76,102 +76,117 @@ jobs: check-generated-protobuf: needs: - - setup - - get-go-version + - setup + - get-go-version runs-on: ${{ fromJSON(needs.setup.outputs.compute-medium) }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version: ${{ needs.get-go-version.outputs.go-version }} - - run: make proto-tools - name: Install protobuf - - run: make proto-format - name: "Protobuf Format" - - run: make --always-make proto - - run: | - if ! git diff --exit-code; then - echo "Generated code was not updated correctly" - exit 1 - fi - - run: make proto-lint - name: "Protobuf Lint" - check-codegen: - needs: - - setup - - get-go-version - runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} - steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version: ${{ needs.get-go-version.outputs.go-version }} - - run: make --always-make codegen - - run: | - if ! git diff --exit-code; then - echo "Generated code was not updated correctly" - exit 1 - fi + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. + - name: Setup Git + if: ${{ endsWith(github.repository, '-enterprise') }} + run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version: ${{ needs.get-go-version.outputs.go-version }} + - run: make proto-tools + name: Install protobuf + - run: make proto-format + name: "Protobuf Format" + - run: make --always-make proto + - run: | + if ! git diff --exit-code; then + echo "Generated code was not updated correctly" + exit 1 + fi + - run: make proto-lint + name: "Protobuf Lint" + - name: Notify Slack + if: ${{ failure() }} + run: .github/scripts/notify_slack.sh + check-generated-deep-copy: + needs: + - setup + - get-go-version + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. + - name: Setup Git + if: ${{ endsWith(github.repository, '-enterprise') }} + run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version: ${{ needs.get-go-version.outputs.go-version }} + - run: make --always-make deep-copy + - run: | + if ! git diff --exit-code; then + echo "Generated code was not updated correctly" + exit 1 + fi + - name: Notify Slack + if: ${{ failure() }} + run: .github/scripts/notify_slack.sh lint-enums: needs: - - setup - - get-go-version + - setup + - get-go-version runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version: ${{ needs.get-go-version.outputs.go-version }} - - run: go install github.com/reillywatson/enumcover/cmd/enumcover@master && enumcover ./... + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. + - name: Setup Git + if: ${{ endsWith(github.repository, '-enterprise') }} + run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version: ${{ needs.get-go-version.outputs.go-version }} + - run: go install github.com/reillywatson/enumcover/cmd/enumcover@master && enumcover ./... + - name: Notify Slack + if: ${{ failure() }} + run: .github/scripts/notify_slack.sh lint-container-test-deps: needs: - - setup - - get-go-version + - setup + - get-go-version runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version: ${{ needs.get-go-version.outputs.go-version }} - - run: make lint-container-test-deps + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. + - name: Setup Git + run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version: ${{ needs.get-go-version.outputs.go-version }} + - run: make lint-container-test-deps + - name: Notify Slack + if: ${{ failure() }} + run: .github/scripts/notify_slack.sh lint-consul-retry: needs: - - setup - - get-go-version + - setup + - get-go-version runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version: ${{ needs.get-go-version.outputs.go-version }} - - run: make lint-consul-retry + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. + - name: Setup Git + if: ${{ endsWith(github.repository, '-enterprise') }} + run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version: ${{ needs.get-go-version.outputs.go-version }} + - run: go install github.com/hashicorp/lint-consul-retry@v1.3.0 && lint-consul-retry + - name: Notify Slack + if: ${{ failure() }} + run: .github/scripts/notify_slack.sh lint: needs: - - setup - - get-go-version + - setup + - get-go-version uses: ./.github/workflows/reusable-lint.yml with: runs-on: ${{ needs.setup.outputs.compute-large }} @@ -182,8 +197,8 @@ jobs: lint-32bit: needs: - - setup - - get-go-version + - setup + - get-go-version uses: ./.github/workflows/reusable-lint.yml with: go-arch: "386" @@ -196,8 +211,8 @@ jobs: # create a development build dev-build: needs: - - setup - - get-go-version + - setup + - get-go-version uses: ./.github/workflows/reusable-dev-build.yml with: runs-on: ${{ needs.setup.outputs.compute-large }} @@ -250,7 +265,7 @@ jobs: # uploaded-binary-name: 'consul-bin-arm64' # runner-count: 12 # runs-on: "['self-hosted', 'ondemand', 'os=macos-arm', 'arm64']" - # go-test-flags: "${{ (github.ref_name != 'main' && !startsWith(github.ref_name, 'release/')) && '-short' || '' }}" + # go-test-flags: 'if ! [[ "$GITHUB_REF_NAME" =~ ^main$|^release/ ]]; then export GO_TEST_FLAGS="-short"; fi' # go-version: ${{ needs.get-go-version.outputs.go-version }} # repository-name: ${{ github.repository }} # secrets: @@ -260,13 +275,13 @@ jobs: go-test-ce: needs: - - setup - - get-go-version - - dev-build + - setup + - get-go-version + - dev-build uses: ./.github/workflows/reusable-unit-split.yml with: directory: . - runner-count: 6 + runner-count: 12 runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} go-tags: "" @@ -282,16 +297,16 @@ jobs: go-test-enterprise: if: ${{ endsWith(github.repository, '-enterprise') }} needs: - - setup - - get-go-version - - dev-build + - setup + - get-go-version + - dev-build uses: ./.github/workflows/reusable-unit-split.yml with: directory: . - runner-count: 6 + runner-count: 12 runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} - go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consuldev' || '' }}" + go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: ${{ needs.get-go-version.outputs.go-version }} permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. @@ -303,17 +318,17 @@ jobs: go-test-race: needs: - - setup - - get-go-version - - dev-build + - setup + - get-go-version + - dev-build uses: ./.github/workflows/reusable-unit.yml with: directory: . - go-test-flags: "-race -gcflags=all=-d=checkptr=0" + go-test-flags: 'GO_TEST_FLAGS="-race -gcflags=all=-d=checkptr=0"' package-names-command: "go list ./... | grep -E -v '^github.com/hashicorp/consul/agent(/consul|/local|/routine-leak-checker)?$' | grep -E -v '^github.com/hashicorp/consul(/command|/connect|/snapshot)'" runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} - go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consuldev' || '' }}" + go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: ${{ needs.get-go-version.outputs.go-version }} permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. @@ -325,17 +340,17 @@ jobs: go-test-32bit: needs: - - setup - - get-go-version - - dev-build + - setup + - get-go-version + - dev-build uses: ./.github/workflows/reusable-unit.yml with: directory: . go-arch: "386" - go-test-flags: "-short" + go-test-flags: 'export GO_TEST_FLAGS="-short"' runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} - go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consuldev' || '' }}" + go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: ${{ needs.get-go-version.outputs.go-version }} permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. @@ -355,10 +370,10 @@ jobs: # with: # uploaded-binary-name: 'consul-bin-s390x' # directory: . - # go-test-flags: -short" + # go-test-flags: 'export GO_TEST_FLAGS="-short"' # runs-on: ${{ needs.setup.outputs.compute-large }} # repository-name: ${{ github.repository }} - # go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consuldev' || '' }}" + # go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" # go-version: ${{ needs.get-go-version.outputs.go-version }} # permissions: # id-token: write # NOTE: this permission is explicitly required for Vault auth. @@ -370,15 +385,15 @@ jobs: go-test-envoyextensions: needs: - - setup - - get-go-version - - dev-build + - setup + - get-go-version + - dev-build uses: ./.github/workflows/reusable-unit.yml with: directory: envoyextensions runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} - go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consuldev' || '' }}" + go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: ${{ needs.get-go-version.outputs.go-version }} permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. @@ -390,15 +405,15 @@ jobs: go-test-troubleshoot: needs: - - setup - - get-go-version - - dev-build + - setup + - get-go-version + - dev-build uses: ./.github/workflows/reusable-unit.yml with: directory: troubleshoot runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} - go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consuldev' || '' }}" + go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: ${{ needs.get-go-version.outputs.go-version }} permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. @@ -411,15 +426,15 @@ jobs: go-test-api-backwards-compatibility: name: go-test-api-${{ needs.get-go-version.outputs.go-version-previous }} needs: - - setup - - get-go-version - - dev-build + - setup + - get-go-version + - dev-build uses: ./.github/workflows/reusable-unit.yml with: directory: api runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} - go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consuldev' || '' }}" + go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: ${{ needs.get-go-version.outputs.go-version-previous }} permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. @@ -431,15 +446,15 @@ jobs: go-test-api: needs: - - setup - - get-go-version - - dev-build + - setup + - get-go-version + - dev-build uses: ./.github/workflows/reusable-unit.yml with: directory: api runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} - go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consuldev' || '' }}" + go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: ${{ needs.get-go-version.outputs.go-version }} permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. @@ -452,15 +467,15 @@ jobs: go-test-sdk-backwards-compatibility: name: go-test-sdk-${{ needs.get-go-version.outputs.go-version-previous }} needs: - - setup - - get-go-version - - dev-build + - setup + - get-go-version + - dev-build uses: ./.github/workflows/reusable-unit.yml with: directory: sdk runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} - go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consuldev' || '' }}" + go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: ${{ needs.get-go-version.outputs.go-version-previous }} permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. @@ -472,15 +487,15 @@ jobs: go-test-sdk: needs: - - setup - - get-go-version - - dev-build + - setup + - get-go-version + - dev-build uses: ./.github/workflows/reusable-unit.yml with: directory: sdk runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} - go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consuldev' || '' }}" + go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: ${{ needs.get-go-version.outputs.go-version }} permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. @@ -511,27 +526,27 @@ jobs: go-tests-success: needs: - - conditional-skip - - setup - - check-codegen - - check-generated-protobuf - - check-go-mod - - lint-consul-retry - - lint-container-test-deps - - lint-enums - - lint - - lint-32bit - # - go-test-arm64 - - go-test-enterprise - - go-test-ce - - go-test-race - - go-test-envoyextensions - - go-test-troubleshoot - - go-test-api-backwards-compatibility - - go-test-api - - go-test-sdk-backwards-compatibility - - go-test-sdk - - go-test-32bit + - conditional-skip + - setup + - check-generated-deep-copy + - check-generated-protobuf + - check-go-mod + - lint-consul-retry + - lint-container-test-deps + - lint-enums + - lint + - lint-32bit + # - go-test-arm64 + - go-test-enterprise + - go-test-ce + - go-test-race + - go-test-envoyextensions + - go-test-troubleshoot + - go-test-api-backwards-compatibility + - go-test-api + - go-test-sdk-backwards-compatibility + - go-test-sdk + - go-test-32bit # - go-test-s390x runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} if: always() && needs.conditional-skip.outputs.skip-ci != 'true' @@ -539,50 +554,7 @@ jobs: - name: evaluate upstream job results run: | # exit 1 if failure or cancelled result for any upstream job - # this ensures that we fail the PR check regardless of cancellation, rather than skip-passing it - # see https://docs.github.com/en/actions/using-jobs/using-conditions-to-control-job-execution#overview if printf '${{ toJSON(needs) }}' | grep -E -i '\"result\": \"(failure|cancelled)\"'; then printf "Tests failed or workflow cancelled:\n\n${{ toJSON(needs) }}" exit 1 fi - - name: Set failure Slack commit message summary - # failure() ensures this runs even if the test eval step exits 1 - if: failure() - env: - # Capturing in an env var makes this safe against GHA shell injection via commit message. - # See https://securitylab.github.com/research/github-actions-untrusted-input/ - COMMIT_MESSAGE_FULL: ${{ github.event.head_commit.message }} - run: | - # if failure (not cancelled), notify Slack - if printf '${{ toJSON(needs) }}' | grep -E -i '\"result\": \"(failure)\"'; then - printf "Tests failed, notifying Slack" - echo "FAILED_TESTS=true" >> $GITHUB_ENV - - # 'echo ... | head -n 1' does not work reliably, so use bash-ism to get first line. - COMMIT_MESSAGE_SUMMARY=${COMMIT_MESSAGE_FULL%%$'\n'*} - - # Send multi-line env var to GITHUB_ENV. - # github.event.head_commit.message and github.ref_name both rely on this event occurring on a push / merge - echo "SLACK_MESSAGE_RAW<> $GITHUB_ENV - echo "❌ ${{ github.workflow }} workflow failed: - - - Run: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - Branch: ${{ github.ref_name }} - - Message: ${COMMIT_MESSAGE_SUMMARY} - - Author: ${{ github.event.sender.login }}" >> $GITHUB_ENV - echo "EOF" >> $GITHUB_ENV - fi - - name: Notify Slack - # failure() ensures this runs even if the test eval step exits 1 - # FAILED_TESTS must also be checked to avoid running this step on cancellation due to the summary check above - if: ${{ failure() && env.FAILED_TESTS == 'true' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }} - id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 - with: - # Escape entire message string to ensure valid JSON. If invalid, the notification will fail silently in CI. - payload: | - { - "message": ${{ toJSON(env.SLACK_MESSAGE_RAW) }} - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.CONSUL_PROTECTED_BRANCH_TEST_SLACK_WEBHOOK }} \ No newline at end of file diff --git a/.github/workflows/issue-comment-created.yml b/.github/workflows/issue-comment-created.yml index 42483d92b1645..01e7e13f8bc44 100644 --- a/.github/workflows/issue-comment-created.yml +++ b/.github/workflows/issue-comment-created.yml @@ -11,8 +11,8 @@ jobs: triage: runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1.3.0 + - uses: actions/checkout@v2 + - uses: actions-ecosystem/action-remove-labels@v1 with: labels: | waiting-reply diff --git a/.github/workflows/jira-issues.yaml b/.github/workflows/jira-issues.yaml index c136dfd69a78c..d595e5f5af8c6 100644 --- a/.github/workflows/jira-issues.yaml +++ b/.github/workflows/jira-issues.yaml @@ -16,7 +16,7 @@ jobs: name: Jira Community Issue sync steps: - name: Login - uses: atlassian/gajira-login@ca13f8850ea309cf44a6e4e0c49d9aa48ac3ca4c # v3 + uses: atlassian/gajira-login@v3.0.0 env: JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} @@ -40,7 +40,7 @@ jobs: - name: Create ticket if an issue is filed, or if PR not by a team member is opened if: github.event.action == 'opened' - uses: tomhjp/gh-action-jira-create@3ed1789cad3521292e591a7cfa703215ec1348bf # v0.2.1 + uses: tomhjp/gh-action-jira-create@v0.2.1 with: project: NET issuetype: "${{ steps.set-ticket-type.outputs.TYPE }}" @@ -60,7 +60,7 @@ jobs: # Education Jira - name: Create ticket in Education board an issue is filed, or if PR not by a team member is opened if: github.event.action == 'opened' && contains(github.event.issue.labels.*.name, 'type/docs') - uses: tomhjp/gh-action-jira-create@3ed1789cad3521292e591a7cfa703215ec1348bf # v0.2.1 + uses: tomhjp/gh-action-jira-create@v0.2.1 with: project: CE issuetype: "${{ steps.set-ticket-type.outputs.TYPE }}" @@ -77,28 +77,28 @@ jobs: - name: Search if: github.event.action != 'opened' id: search - uses: tomhjp/gh-action-jira-search@04700b457f317c3e341ce90da5a3ff4ce058f2fa # v0.2.2 + uses: tomhjp/gh-action-jira-search@v0.2.2 with: # cf[10089] is Issue Link (use JIRA API to retrieve) jql: 'issuetype = "${{ steps.set-ticket-type.outputs.TYPE }}" and cf[10089] = "${{ github.event.issue.html_url || github.event.pull_request.html_url }}"' - name: Sync comment if: github.event.action == 'created' && steps.search.outputs.issue - uses: tomhjp/gh-action-jira-comment@6eb6b9ead70221916b6badd118c24535ed220bd9 # v0.2.0 + uses: tomhjp/gh-action-jira-comment@v0.2.0 with: issue: ${{ steps.search.outputs.issue }} comment: "${{ github.actor }} ${{ github.event.review.state || 'commented' }}:\n\n${{ github.event.comment.body || github.event.review.body }}\n\n${{ github.event.comment.html_url || github.event.review.html_url }}" - name: Close ticket if: ( github.event.action == 'closed' || github.event.action == 'deleted' ) && steps.search.outputs.issue - uses: atlassian/gajira-transition@4749176faf14633954d72af7a44d7f2af01cc92b # v3 + uses: atlassian/gajira-transition@v3.0.1 with: issue: ${{ steps.search.outputs.issue }} transition: "Closed" - name: Reopen ticket if: github.event.action == 'reopened' && steps.search.outputs.issue - uses: atlassian/gajira-transition@4749176faf14633954d72af7a44d7f2af01cc92b # v3 + uses: atlassian/gajira-transition@v3.0.1 with: issue: ${{ steps.search.outputs.issue }} transition: "To Do" diff --git a/.github/workflows/jira-pr.yaml b/.github/workflows/jira-pr.yaml index a40bb0ae0f829..cadbfef1b2b8c 100644 --- a/.github/workflows/jira-pr.yaml +++ b/.github/workflows/jira-pr.yaml @@ -14,7 +14,7 @@ jobs: name: Jira sync steps: - name: Login - uses: atlassian/gajira-login@ca13f8850ea309cf44a6e4e0c49d9aa48ac3ca4c # v3 + uses: atlassian/gajira-login@v3.0.0 env: JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} @@ -59,7 +59,7 @@ jobs: - name: Create ticket if an issue is filed, or if PR not by a team member is opened if: ( github.event.action == 'opened' && steps.is-team-member.outputs.MESSAGE == 'false' ) - uses: tomhjp/gh-action-jira-create@3ed1789cad3521292e591a7cfa703215ec1348bf # v0.2.1 + uses: tomhjp/gh-action-jira-create@v0.2.1 with: project: NET issuetype: "${{ steps.set-ticket-type.outputs.TYPE }}" @@ -79,7 +79,7 @@ jobs: # Education Jira - name: Create ticket in Education board an issue is filed, or if PR not by a team member is opened if: github.event.action == 'opened' && steps.is-team-member.outputs.MESSAGE == 'false' && contains(github.event.issue.labels.*.name, 'type/docs') - uses: tomhjp/gh-action-jira-create@3ed1789cad3521292e591a7cfa703215ec1348bf # v0.2.1 + uses: tomhjp/gh-action-jira-create@v0.2.1 with: project: CE issuetype: "${{ steps.set-ticket-type.outputs.TYPE }}" @@ -91,28 +91,28 @@ jobs: - name: Search if: github.event.action != 'opened' id: search - uses: tomhjp/gh-action-jira-search@04700b457f317c3e341ce90da5a3ff4ce058f2fa # v0.2.2 + uses: tomhjp/gh-action-jira-search@v0.2.2 with: # cf[10089] is Issue Link (use JIRA API to retrieve) jql: 'issuetype = "${{ steps.set-ticket-type.outputs.TYPE }}" and cf[10089] = "${{ github.event.issue.html_url || github.event.pull_request.html_url }}"' - name: Sync comment if: github.event.action == 'created' && steps.search.outputs.issue - uses: tomhjp/gh-action-jira-comment@6eb6b9ead70221916b6badd118c24535ed220bd9 # v0.2.0 + uses: tomhjp/gh-action-jira-comment@v0.2.0 with: issue: ${{ steps.search.outputs.issue }} comment: "${{ github.actor }} ${{ github.event.review.state || 'commented' }}:\n\n${{ github.event.comment.body || github.event.review.body }}\n\n${{ github.event.comment.html_url || github.event.review.html_url }}" - name: Close ticket if: ( github.event.action == 'closed' || github.event.action == 'deleted' ) && steps.search.outputs.issue - uses: atlassian/gajira-transition@4749176faf14633954d72af7a44d7f2af01cc92b # v3 + uses: atlassian/gajira-transition@v3.0.1 with: issue: ${{ steps.search.outputs.issue }} transition: "Closed" - name: Reopen ticket if: github.event.action == 'reopened' && steps.search.outputs.issue - uses: atlassian/gajira-transition@4749176faf14633954d72af7a44d7f2af01cc92b # v3 + uses: atlassian/gajira-transition@v3.0.1 with: issue: ${{ steps.search.outputs.issue }} transition: "To Do" diff --git a/.github/workflows/nightly-test-1.16.x.yaml b/.github/workflows/nightly-test-1.12.x.yaml similarity index 75% rename from .github/workflows/nightly-test-1.16.x.yaml rename to .github/workflows/nightly-test-1.12.x.yaml index b441eca5d0f59..c09cc4864b89d 100644 --- a/.github/workflows/nightly-test-1.16.x.yaml +++ b/.github/workflows/nightly-test-1.12.x.yaml @@ -1,28 +1,27 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 -name: Nightly Frontend Test 1.16.x +name: Nightly Test 1.12.x on: schedule: - cron: '0 4 * * *' workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition - BRANCH: "release/1.16.x" - BRANCH_NAME: "release-1.16.x" # Used for naming artifacts - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + BRANCH: "release/1.12.x" + BRANCH_NAME: "release-1.12.x" # Used for naming artifacts jobs: frontend-test-workspace-node: runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -49,12 +48,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -71,7 +70,7 @@ jobs: run: make build-ci - name: Upload CE Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ce-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -88,12 +87,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-ce.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -105,7 +104,7 @@ jobs: run: make deps - name: Download CE Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ce-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -121,12 +120,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -143,7 +142,7 @@ jobs: run: make build-ci - name: Upload ENT Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -160,12 +159,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-ce.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -177,7 +176,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -191,12 +190,12 @@ jobs: runs-on: ubuntu-latest needs: [frontend-build-ent] steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -208,7 +207,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -224,7 +223,7 @@ jobs: steps: - name: Slack Notification id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@v1.19 with: payload: | { diff --git a/.github/workflows/nightly-test-1.17.x.yaml b/.github/workflows/nightly-test-1.13.x.yaml similarity index 73% rename from .github/workflows/nightly-test-1.17.x.yaml rename to .github/workflows/nightly-test-1.13.x.yaml index 9a063001e402c..6139eb4bc1e1a 100644 --- a/.github/workflows/nightly-test-1.17.x.yaml +++ b/.github/workflows/nightly-test-1.13.x.yaml @@ -1,30 +1,29 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 -name: Nightly Frontend Test 1.17.x +name: Nightly Test 1.13.x on: schedule: - cron: '0 4 * * *' workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition - BRANCH: "release/1.17.x" - BRANCH_NAME: "release-1.17.x" # Used for naming artifacts - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + BRANCH: "release/1.13.x" + BRANCH_NAME: "release-1.13.x" # Used for naming artifacts jobs: frontend-test-workspace-node: runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -49,14 +48,14 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -71,7 +70,7 @@ jobs: run: make build-ci - name: Upload CE Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ce-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -88,14 +87,14 @@ jobs: EMBER_TEST_REPORT: test-results/report-ce.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -105,7 +104,7 @@ jobs: run: make deps - name: Download CE Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ce-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -121,14 +120,14 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -143,7 +142,7 @@ jobs: run: make build-ci - name: Upload ENT Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -160,14 +159,14 @@ jobs: EMBER_TEST_REPORT: test-results/report-ce.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -177,7 +176,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -191,14 +190,14 @@ jobs: runs-on: ubuntu-latest needs: [frontend-build-ent] steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -208,7 +207,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -224,7 +223,7 @@ jobs: steps: - name: Slack Notification id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@v1.19 with: payload: | { diff --git a/.github/workflows/nightly-test-1.14.x.yaml b/.github/workflows/nightly-test-1.14.x.yaml index 11fb011d13571..9b310f59065dc 100644 --- a/.github/workflows/nightly-test-1.14.x.yaml +++ b/.github/workflows/nightly-test-1.14.x.yaml @@ -1,28 +1,27 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 -name: Nightly Frontend Test 1.14.x +name: Nightly Test 1.14.x on: schedule: - cron: '0 4 * * *' workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "release/1.14.x" - BRANCH_NAME: "release-1.14.x" # Used for naming artifacts - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + BRANCH_NAME: "release-1.14.x" # Used for naming artifacts jobs: frontend-test-workspace-node: runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -49,12 +48,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -71,7 +70,7 @@ jobs: run: make build-ci - name: Upload CE Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ce-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -88,12 +87,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-ce.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -105,7 +104,7 @@ jobs: run: make deps - name: Download CE Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ce-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -121,12 +120,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -143,7 +142,7 @@ jobs: run: make build-ci - name: Upload ENT Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -160,12 +159,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-ce.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -177,7 +176,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -191,12 +190,12 @@ jobs: runs-on: ubuntu-latest needs: [frontend-build-ent] steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -208,7 +207,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -224,7 +223,7 @@ jobs: steps: - name: Slack Notification id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@v1.19 with: payload: | { diff --git a/.github/workflows/nightly-test-1.15.x.yaml b/.github/workflows/nightly-test-1.15.x.yaml index a98eb73070b36..9048abb4a04e4 100644 --- a/.github/workflows/nightly-test-1.15.x.yaml +++ b/.github/workflows/nightly-test-1.15.x.yaml @@ -1,28 +1,27 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 -name: Nightly Frontend Test 1.15.x +name: Nightly Test 1.15.x on: schedule: - cron: '0 4 * * *' workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "release/1.15.x" - BRANCH_NAME: "release-1.15.x" # Used for naming artifacts - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + BRANCH_NAME: "release-1.15.x" # Used for naming artifacts jobs: frontend-test-workspace-node: runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -49,12 +48,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -71,7 +70,7 @@ jobs: run: make build-ci - name: Upload CE Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ce-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -88,12 +87,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-ce.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -105,7 +104,7 @@ jobs: run: make deps - name: Download CE Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ce-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -121,12 +120,12 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -143,7 +142,7 @@ jobs: run: make build-ci - name: Upload ENT Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -160,12 +159,12 @@ jobs: EMBER_TEST_REPORT: test-results/report-ce.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -177,7 +176,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -191,12 +190,12 @@ jobs: runs-on: ubuntu-latest needs: [frontend-build-ent] steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: node-version: 14 cache: 'yarn' @@ -208,7 +207,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -224,7 +223,7 @@ jobs: steps: - name: Slack Notification id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@v1.19 with: payload: | { diff --git a/.github/workflows/nightly-test-integrations-1.15.x.yml b/.github/workflows/nightly-test-integrations-1.15.x.yml index 3889c7246bdaa..10bb8cb0ef46a 100644 --- a/.github/workflows/nightly-test-integrations-1.15.x.yml +++ b/.github/workflows/nightly-test-integrations-1.15.x.yml @@ -14,7 +14,7 @@ env: TEST_RESULTS_ARTIFACT_NAME: test-results CONSUL_LICENSE: ${{ secrets.CONSUL_LICENSE }} GOTAGS: ${{ endsWith(github.repository, '-enterprise') && 'consulent' || '' }} - GOTESTSUM_VERSION: "1.11.0" + GOTESTSUM_VERSION: "1.10.1" CONSUL_BINARY_UPLOAD_NAME: consul-bin # strip the hashicorp/ off the front of github.repository for consul CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'hashicorp/consul' }} @@ -73,10 +73,10 @@ jobs: env: # this is further going to multiplied in envoy-integration tests by the # other dimensions in the matrix. Currently TOTAL_RUNNERS would be - # 14 based on these values: - # envoy-version: ["1.22.11", "1.23.12", "1.24.12", "1.25.11", "1.26.7", "1.27.3", "1.28.1"] + # multiplied by 8 based on these values: + # envoy-version: ["1.22.11", "1.23.12", "1.24.12", "1.25.11"] # xds-target: ["server", "client"] - TOTAL_RUNNERS: 7 + TOTAL_RUNNERS: 4 JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]' run: | NUM_RUNNERS=$TOTAL_RUNNERS @@ -109,7 +109,7 @@ jobs: strategy: fail-fast: false matrix: - envoy-version: ["1.22.11", "1.23.12", "1.24.12", "1.25.11", "1.26.7", "1.27.3", "1.28.1"] + envoy-version: ["1.22.11", "1.23.12", "1.24.12", "1.25.11"] xds-target: ["server", "client"] test-cases: ${{ fromJSON(needs.generate-envoy-job-matrices.outputs.envoy-matrix) }} env: @@ -242,8 +242,9 @@ jobs: docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ --raw-command \ - --format=github-actions \ - --rerun-fails \ + --format=short-verbose \ + --debug \ + --rerun-fails=2 \ --packages="./..." \ -- \ go test \ diff --git a/.github/workflows/nightly-test-integrations-1.16.x.yml b/.github/workflows/nightly-test-integrations-1.16.x.yml index d6cf2aa875225..93f4db0258216 100644 --- a/.github/workflows/nightly-test-integrations-1.16.x.yml +++ b/.github/workflows/nightly-test-integrations-1.16.x.yml @@ -14,7 +14,7 @@ env: TEST_RESULTS_ARTIFACT_NAME: test-results CONSUL_LICENSE: ${{ secrets.CONSUL_LICENSE }} GOTAGS: ${{ endsWith(github.repository, '-enterprise') && 'consulent' || '' }} - GOTESTSUM_VERSION: "1.11.0" + GOTESTSUM_VERSION: "1.10.1" CONSUL_BINARY_UPLOAD_NAME: consul-bin # strip the hashicorp/ off the front of github.repository for consul CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'hashicorp/consul' }} @@ -74,9 +74,9 @@ jobs: # this is further going to multiplied in envoy-integration tests by the # other dimensions in the matrix. Currently TOTAL_RUNNERS would be # multiplied by 8 based on these values: - # envoy-version: ["1.23.12", "1.24.12", "1.25.11", "1.26.7"] + # envoy-version: ["1.23.12", "1.24.12", "1.25.11", "1.26.6"] # xds-target: ["server", "client"] - TOTAL_RUNNERS: 8 + TOTAL_RUNNERS: 4 JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]' run: | NUM_RUNNERS=$TOTAL_RUNNERS @@ -109,7 +109,7 @@ jobs: strategy: fail-fast: false matrix: - envoy-version: ["1.23.12", "1.24.12", "1.25.11", "1.26.7"] + envoy-version: ["1.23.12", "1.24.12", "1.25.11", "1.26.6"] xds-target: ["server", "client"] test-cases: ${{ fromJSON(needs.generate-envoy-job-matrices.outputs.envoy-matrix) }} env: @@ -263,8 +263,9 @@ jobs: docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ --raw-command \ - --format=github-actions \ - --rerun-fails \ + --format=short-verbose \ + --debug \ + --rerun-fails=2 \ --packages="./..." \ -- \ go test \ diff --git a/.github/workflows/nightly-test-integrations-1.17.x.yml b/.github/workflows/nightly-test-integrations-1.17.x.yml index 6af47c8adf056..a0c63b7108c2b 100644 --- a/.github/workflows/nightly-test-integrations-1.17.x.yml +++ b/.github/workflows/nightly-test-integrations-1.17.x.yml @@ -74,7 +74,7 @@ jobs: # this is further going to multiplied in envoy-integration tests by the # other dimensions in the matrix. Currently TOTAL_RUNNERS would be # multiplied by 8 based on these values: - # envoy-version: ["1.24.12", "1.25.11", "1.26.7", "1.27.3"] + # envoy-version: ["1.24.12", "1.25.11", "1.26.6", "1.27.2"] # xds-target: ["server", "client"] TOTAL_RUNNERS: 4 JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]' @@ -109,7 +109,7 @@ jobs: strategy: fail-fast: false matrix: - envoy-version: ["1.24.12", "1.25.11", "1.26.7", "1.27.3"] + envoy-version: ["1.24.12", "1.25.11", "1.26.6", "1.27.2"] xds-target: ["server", "client"] test-cases: ${{ fromJSON(needs.generate-envoy-job-matrices.outputs.envoy-matrix) }} env: diff --git a/.github/workflows/nightly-test-integrations.yml b/.github/workflows/nightly-test-integrations.yml index 6dd5d37ed0f55..d7812bee35ef2 100644 --- a/.github/workflows/nightly-test-integrations.yml +++ b/.github/workflows/nightly-test-integrations.yml @@ -14,7 +14,7 @@ env: TEST_RESULTS_ARTIFACT_NAME: test-results CONSUL_LICENSE: ${{ secrets.CONSUL_LICENSE }} GOTAGS: ${{ endsWith(github.repository, '-enterprise') && 'consulent' || '' }} - GOTESTSUM_VERSION: "1.11.0" + GOTESTSUM_VERSION: "1.10.1" CONSUL_BINARY_UPLOAD_NAME: consul-bin # strip the hashicorp/ off the front of github.repository for consul CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'hashicorp/consul' }} @@ -71,9 +71,9 @@ jobs: # this is further going to multiplied in envoy-integration tests by the # other dimensions in the matrix. Currently TOTAL_RUNNERS would be # multiplied by 8 based on these values: - # envoy-version: ["1.25.11", "1.26.7", "1.27.3", "1.28.1"] + # envoy-version: ["1.24.10", "1.25.9", "1.26.4", "1.27.0"] # xds-target: ["server", "client"] - TOTAL_RUNNERS: 8 + TOTAL_RUNNERS: 4 JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]' run: | NUM_RUNNERS=$TOTAL_RUNNERS @@ -94,7 +94,7 @@ jobs: } >> "$GITHUB_OUTPUT" envoy-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - get-go-version @@ -106,7 +106,7 @@ jobs: strategy: fail-fast: false matrix: - envoy-version: ["1.25.11", "1.26.7", "1.27.3", "1.28.1"] + envoy-version: ["1.23.12", "1.24.10", "1.25.9", "1.26.4"] xds-target: ["server", "client"] test-cases: ${{ fromJSON(needs.generate-envoy-job-matrices.outputs.envoy-matrix) }} env: @@ -190,7 +190,7 @@ jobs: run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml upgrade-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - get-go-version @@ -201,13 +201,10 @@ jobs: strategy: fail-fast: false matrix: - consul-version: [ "1.16", "1.17"] + consul-version: ["1.14", "1.15", "1.16"] env: CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} - # ENVOY_VERSION should be the latest version upported by all - # consul versions in the matrix.consul-version, since we are testing upgrade from - # an older consul version, e.g., 1.26.6 is supported by both 1.16 and 1.17. - ENVOY_VERSION: "1.26.6" + ENVOY_VERSION: "1.24.6" steps: - name: Checkout code uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 @@ -234,13 +231,11 @@ jobs: run: docker build -t ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local -f ./build-support/docker/Consul-Dev.dockerfile . - name: Build consul-envoy:latest-version image id: buildConsulEnvoyLatestImage - run: | - if ${{ endsWith(github.repository, '-enterprise') }} == 'true' - then - docker build -t consul-envoy:latest-version --build-arg CONSUL_IMAGE=docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }}:${{ env.CONSUL_LATEST_VERSION }}-ent --build-arg ENVOY_VERSION=${{ env.ENVOY_VERSION }} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets - else - docker build -t consul-envoy:latest-version --build-arg CONSUL_IMAGE=docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }}:${{ env.CONSUL_LATEST_VERSION }} --build-arg ENVOY_VERSION=${{ env.ENVOY_VERSION }} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets - fi + continue-on-error: true + run: docker build -t consul-envoy:latest-version --build-arg CONSUL_IMAGE=docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }}:${{ env.CONSUL_LATEST_VERSION }} --build-arg ENVOY_VERSION=${{ env.ENVOY_VERSION }} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets + - name: Retry Build consul-envoy:latest-version image + if: steps.buildConsulEnvoyLatestImage.outcome == 'failure' + run: docker build -t consul-envoy:latest-version --build-arg CONSUL_IMAGE=docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }}:${{ env.CONSUL_LATEST_VERSION }} --build-arg ENVOY_VERSION=${{ env.ENVOY_VERSION }} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets - name: Build consul-envoy:target-version image id: buildConsulEnvoyTargetImage continue-on-error: true @@ -263,8 +258,9 @@ jobs: docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ --raw-command \ - --format=github-actions \ - --rerun-fails \ + --format=short-verbose \ + --debug \ + --rerun-fails=2 \ --packages="./..." \ -- \ go test \ @@ -318,94 +314,6 @@ jobs: DD_ENV: ci run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml - upgrade-integration-test-deployer: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} - needs: - - setup - - get-go-version - permissions: - id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read - strategy: - fail-fast: false - matrix: - consul-version: [ "1.16", "1.17"] - env: - CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} - steps: - - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - with: - ref: ${{ inputs.branch }} - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version: ${{ needs.get-go-version.outputs.go-version }} - - run: go env - - name: Build image - run: make test-deployer-setup - - name: Upgrade Integration Tests - run: | - mkdir -p "${{ env.TEST_RESULTS_DIR }}" - #export NOLOGBUFFER=1 - cd ./test-integ/upgrade - docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version - go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --raw-command \ - --format=github-actions \ - --packages="./..." \ - -- \ - go test \ - -tags "${{ env.GOTAGS }}" \ - -timeout=20m \ - -parallel=2 \ - -json \ - ./... \ - --target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --target-version local \ - --latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --latest-version "${{ env.CONSUL_LATEST_VERSION }}" - env: - # this is needed because of incompatibility between RYUK container and GHA - GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml - GOTESTSUM_FORMAT: standard-verbose - COMPOSE_INTERACTIVE_NO_CLI: 1 - # tput complains if this isn't set to something. - TERM: ansi - # NOTE: ENT specific step as we store secrets in Vault. - - name: Authenticate to Vault - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: vault-auth - run: vault-auth - - # NOTE: ENT specific step as we store secrets in Vault. - - name: Fetch Secrets - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: secrets - uses: hashicorp/vault-action@v2.5.0 - with: - url: ${{ steps.vault-auth.outputs.addr }} - caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} - token: ${{ steps.vault-auth.outputs.token }} - secrets: | - kv/data/github/${{ github.repository }}/datadog apikey | DATADOG_API_KEY; - - - name: prepare datadog-ci - if: ${{ !cancelled() && !endsWith(github.repository, '-enterprise') }} - run: | - curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci" - chmod +x /usr/local/bin/datadog-ci - - - name: upload coverage - # do not run on forks - if: ${{ !cancelled() && github.event.pull_request.head.repo.full_name == github.repository }} - env: - DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}" - DD_ENV: ci - run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml test-integrations-success: needs: @@ -414,7 +322,6 @@ jobs: - generate-envoy-job-matrices - envoy-integration-test - upgrade-integration-test - - upgrade-integration-test-deployer runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} if: ${{ always() }} steps: @@ -425,14 +332,3 @@ jobs: printf "Tests failed or workflow cancelled:\n\n${{ toJSON(needs) }}" exit 1 fi - - name: Notify Slack - if: ${{ failure() }} - id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 - with: - payload: | - { - "message": "One or more nightly integration tests have failed. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.CONSUL_NIGHTLY_INTEG_TEST_SLACK_WEBHOOK }} diff --git a/.github/workflows/nightly-test-main.yaml b/.github/workflows/nightly-test-main.yaml index a089121cc8c66..16160175b6815 100644 --- a/.github/workflows/nightly-test-main.yaml +++ b/.github/workflows/nightly-test-main.yaml @@ -1,30 +1,29 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 -name: Nightly Frontend Test Main +name: Nightly Test Main on: schedule: - cron: '0 4 * * *' workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "main" - BRANCH_NAME: "main" # Used for naming artifacts - GOPRIVATE: github.com/hashicorp # Required for enterprise deps + BRANCH_NAME: "main" # Used for naming artifacts jobs: frontend-test-workspace-node: runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -49,14 +48,14 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -71,7 +70,7 @@ jobs: run: make build-ci - name: Upload CE Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ce-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -88,14 +87,14 @@ jobs: EMBER_TEST_REPORT: test-results/report-ce.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -105,7 +104,7 @@ jobs: run: make deps - name: Download CE Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ce-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -121,14 +120,14 @@ jobs: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -143,7 +142,7 @@ jobs: run: make build-ci - name: Upload ENT Frontend - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + uses: actions/upload-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -160,14 +159,14 @@ jobs: EMBER_TEST_REPORT: test-results/report-ce.xml #outputs test report for CI test summary EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -177,7 +176,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -191,14 +190,14 @@ jobs: runs-on: ubuntu-latest needs: [frontend-build-ent] steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ env.BRANCH }} # Not necessary to use yarn, but enables caching - - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + - uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -208,7 +207,7 @@ jobs: run: make deps - name: Download ENT Frontend - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@v3 with: name: frontend-ent-${{ env.BRANCH_NAME }} path: ./ui/packages/consul-ui/dist @@ -224,7 +223,7 @@ jobs: steps: - name: Slack Notification id: slack - uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + uses: slackapi/slack-github-action@v1.19 with: payload: | { diff --git a/.github/workflows/pr-labeler.yml b/.github/workflows/pr-labeler.yml index 0d6b71c9f0618..3e39524242327 100644 --- a/.github/workflows/pr-labeler.yml +++ b/.github/workflows/pr-labeler.yml @@ -1,6 +1,6 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + name: "Pull Request Labeler" on: pull_request_target: @@ -10,8 +10,8 @@ jobs: triage: runs-on: ubuntu-latest steps: - - uses: actions/labeler@0967ca812e7fdc8f5f71402a1b486d5bd061fe20 # v4.2.0 - with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" - configuration-path: .github/pr-labeler.yml - sync-labels: false + - uses: actions/labeler@0967ca812e7fdc8f5f71402a1b486d5bd061fe20 # v4.2.0 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + configuration-path: .github/pr-labeler.yml + sync-labels: false \ No newline at end of file diff --git a/.github/workflows/pr-metrics-test-checker.yml b/.github/workflows/pr-metrics-test-checker.yml index d0bdac04f7e3f..a73f4fbb3ff5a 100644 --- a/.github/workflows/pr-metrics-test-checker.yml +++ b/.github/workflows/pr-metrics-test-checker.yml @@ -14,7 +14,7 @@ jobs: if: "! ( contains(github.event.pull_request.labels.*.name, 'pr/no-metrics-test') || github.event.pull_request.user.login == 'hc-github-team-consul-core' )" runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 name: "checkout repo" with: ref: ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/reusable-check-go-mod.yml b/.github/workflows/reusable-check-go-mod.yml index a646aa0712061..8a41778a89e86 100644 --- a/.github/workflows/reusable-check-go-mod.yml +++ b/.github/workflows/reusable-check-go-mod.yml @@ -21,12 +21,12 @@ jobs: runs-on: ${{ fromJSON(inputs.runs-on) }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(inputs.repository-name, '-enterprise') }} run: git config --global url."https://${{ secrets.elevated-github-token }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version: ${{ inputs.go-version }} # Run on all go.mod (include submodules). @@ -37,3 +37,6 @@ jobs: git status -s exit 1 fi + - name: Notify Slack + if: ${{ failure() }} + run: .github/scripts/notify_slack.sh diff --git a/.github/workflows/reusable-dev-build-windows.yml b/.github/workflows/reusable-dev-build-windows.yml index 1417d1dbae366..8083832f4b535 100644 --- a/.github/workflows/reusable-dev-build-windows.yml +++ b/.github/workflows/reusable-dev-build-windows.yml @@ -45,3 +45,6 @@ jobs: with: name: ${{inputs.uploaded-binary-name}} path: consul.exe + - name: Notify Slack + if: ${{ failure() }} + run: .github/scripts/notify_slack.sh diff --git a/.github/workflows/reusable-dev-build.yml b/.github/workflows/reusable-dev-build.yml index 511ea7925cb19..3f7df9c2d5bb3 100644 --- a/.github/workflows/reusable-dev-build.yml +++ b/.github/workflows/reusable-dev-build.yml @@ -14,10 +14,6 @@ on: repository-name: required: true type: string - branch-name: - required: false - type: string - default: "" go-arch: required: false type: string @@ -32,15 +28,7 @@ jobs: build: runs-on: ${{ fromJSON(inputs.runs-on) }} steps: - # NOTE: This is used for nightly job of building release branch. - - name: Checkout branch ${{ inputs.branch-name }} - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - with: - ref: ${{ inputs.branch-name }} - if: inputs.branch-name != '' - - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - if: inputs.branch-name == '' + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(inputs.repository-name, '-enterprise') }} @@ -57,3 +45,6 @@ jobs: with: name: ${{inputs.uploaded-binary-name}} path: ./bin/consul + - name: Notify Slack + if: ${{ failure() }} + run: .github/scripts/notify_slack.sh diff --git a/.github/workflows/reusable-lint.yml b/.github/workflows/reusable-lint.yml index b834d564918d9..82c68bc21998d 100644 --- a/.github/workflows/reusable-lint.yml +++ b/.github/workflows/reusable-lint.yml @@ -21,9 +21,8 @@ on: elevated-github-token: required: true env: - GOTAGS: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consuldev' || '' }}" + GOTAGS: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" GOARCH: ${{inputs.go-arch}} - GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: lint: @@ -37,26 +36,27 @@ jobs: - "envoyextensions" - "troubleshoot" - "test/integration/consul-container" - - "test-integ" - - "testing/deployer" fail-fast: true name: lint ${{ matrix.directory }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(inputs.repository-name, '-enterprise') }} run: git config --global url."https://${{ secrets.elevated-github-token }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version: ${{ inputs.go-version }} - run: go env - name: Set golangci-lint version run: echo "GOLANGCI_LINT_VERSION=$(make --no-print-directory print-GOLANGCI_LINT_VERSION)" >> $GITHUB_ENV - name: lint-${{ matrix.directory }} - uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299 # v3.6.0 + uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # pin@v3.4.0 with: working-directory: ${{ matrix.directory }} version: ${{ env.GOLANGCI_LINT_VERSION }} args: --build-tags="${{ env.GOTAGS }}" -v skip-cache: true + - name: Notify Slack + if: ${{ failure() }} + run: .github/scripts/notify_slack.sh diff --git a/.github/workflows/reusable-unit-split.yml b/.github/workflows/reusable-unit-split.yml index ab16db368b737..210039e908852 100644 --- a/.github/workflows/reusable-unit-split.yml +++ b/.github/workflows/reusable-unit-split.yml @@ -49,12 +49,11 @@ on: required: true env: TEST_RESULTS: /tmp/test-results - GOTESTSUM_VERSION: "1.11.0" + GOTESTSUM_VERSION: "1.10.1" GOARCH: ${{inputs.go-arch}} TOTAL_RUNNERS: ${{inputs.runner-count}} CONSUL_LICENSE: ${{secrets.consul-license}} GOTAGS: ${{ inputs.go-tags}} - GOPRIVATE: github.com/hashicorp # Required for enterprise deps DATADOG_API_KEY: ${{secrets.datadog-api-key}} jobs: @@ -63,8 +62,8 @@ jobs: outputs: package-matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version: ${{ inputs.go-version }} - id: set-matrix @@ -86,12 +85,12 @@ jobs: ulimit -Sa echo "Hard limits" ulimit -Ha - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(inputs.repository-name, '-enterprise') }} run: git config --global url."https://${{ secrets.elevated-github-token }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # pin@v3.5.0 with: go-version: ${{ inputs.go-version }} - run: mkdir -p ${{env.TEST_RESULTS}} @@ -99,7 +98,7 @@ jobs: working-directory: ${{inputs.directory}} run: go mod download - name: Download consul - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # pin@v3.0.2 with: name: ${{inputs.uploaded-binary-name}} path: ${{inputs.directory}} @@ -117,19 +116,22 @@ jobs: PACKAGE_NAMES="${{ join(matrix.package, ' ') }}" # PACKAGE_NAMES="${{ matrix.package }}" + ${{inputs.go-test-flags}} + # some tests expect this umask, and arm images have a different default umask 0022 go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --format=github-actions \ - --format-hide-empty-pkg \ + --format=short-verbose \ --jsonfile /tmp/jsonfile/go-test.log \ - --rerun-fails \ + --debug \ + --rerun-fails=3 \ + --rerun-fails-max-failures=40 \ --rerun-fails-report=/tmp/gotestsum-rerun-fails \ --packages="$PACKAGE_NAMES" \ --junitfile ${{env.TEST_RESULTS}}/gotestsum-report.xml -- \ - -tags="${{env.GOTAGS}}" \ - ${{inputs.go-test-flags}} \ + -tags="${{env.GOTAGS}}" -p 2 \ + ${GO_TEST_FLAGS-} \ -cover -coverprofile=coverage.txt \ -timeout=30m @@ -164,12 +166,12 @@ jobs: DD_ENV: ci run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" ${{env.TEST_RESULTS}}/gotestsum-report.xml - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@v3.1.2 if: ${{ !cancelled() }} with: name: test-results path: ${{env.TEST_RESULTS}} - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@v3.1.2 if: ${{ !cancelled() }} with: name: jsonfile @@ -178,3 +180,6 @@ jobs: if: ${{ !cancelled() }} run: | .github/scripts/rerun_fails_report.sh /tmp/gotestsum-rerun-fails + - name: Notify Slack + if: ${{ failure() }} + run: .github/scripts/notify_slack.sh diff --git a/.github/workflows/reusable-unit.yml b/.github/workflows/reusable-unit.yml index 072999d79e279..431577135a62d 100644 --- a/.github/workflows/reusable-unit.yml +++ b/.github/workflows/reusable-unit.yml @@ -45,18 +45,17 @@ on: required: true env: TEST_RESULTS: /tmp/test-results - GOTESTSUM_VERSION: "1.11.0" + GOTESTSUM_VERSION: "1.10.1" GOARCH: ${{inputs.go-arch}} CONSUL_LICENSE: ${{secrets.consul-license}} GOTAGS: ${{ inputs.go-tags}} - GOPRIVATE: github.com/hashicorp # Required for enterprise deps DATADOG_API_KEY: ${{secrets.datadog-api-key}} jobs: go-test: runs-on: ${{ fromJSON(inputs.runs-on) }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # pin@v3.3.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(inputs.repository-name, '-enterprise') }} @@ -69,7 +68,7 @@ jobs: working-directory: ${{inputs.directory}} run: go mod download - name: Download consul - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # pin@v3.0.2 with: name: ${{inputs.uploaded-binary-name}} path: ${{inputs.directory}} @@ -88,15 +87,19 @@ jobs: # some tests expect this umask, and arm images have a different default umask 0022 + ${{inputs.go-test-flags}} + go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --format=github-actions \ + --format=short-verbose \ --jsonfile /tmp/jsonfile/go-test.log \ - --rerun-fails \ + --debug \ + --rerun-fails=3 \ + --rerun-fails-max-failures=40 \ --rerun-fails-report=/tmp/gotestsum-rerun-fails \ --packages="$PACKAGE_NAMES" \ --junitfile ${{env.TEST_RESULTS}}/gotestsum-report.xml -- \ -tags="${{env.GOTAGS}}" \ - ${{inputs.go-test-flags}} \ + ${GO_TEST_FLAGS-} \ -cover -coverprofile=coverage.txt \ -timeout=30m @@ -131,12 +134,12 @@ jobs: DD_ENV: ci run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" ${{env.TEST_RESULTS}}/gotestsum-report.xml - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@v3.1.2 if: ${{ !cancelled() }} with: name: test-results path: ${{env.TEST_RESULTS}} - - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # pin@v3.1.2 if: ${{ !cancelled() }} with: name: jsonfile @@ -145,3 +148,6 @@ jobs: if: ${{ !cancelled() }} run: | .github/scripts/rerun_fails_report.sh /tmp/gotestsum-rerun-fails + - name: Notify Slack + if: ${{ failure() }} + run: .github/scripts/notify_slack.sh diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index ff07a961a4e48..f3da6d422b6b1 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -12,7 +12,7 @@ jobs: permissions: pull-requests: write steps: - - uses: actions/stale@1160a2240286f5da8ec72b1c0816ce2481aabf84 # v8.0.0 + - uses: actions/stale@v4 with: days-before-stale: -1 days-before-close: -1 diff --git a/.github/workflows/test-integrations-windows.yml b/.github/workflows/test-integrations-windows.yml index ef10e6e578332..15c9e44b33708 100644 --- a/.github/workflows/test-integrations-windows.yml +++ b/.github/workflows/test-integrations-windows.yml @@ -11,7 +11,7 @@ env: TEST_RESULTS_ARTIFACT_NAME: test-results CONSUL_LICENSE: ${{ secrets.CONSUL_LICENSE }} GOTAGS: ${{ endsWith(github.repository, '-enterprise') && 'consulent' || '' }} - GOTESTSUM_VERSION: "1.11.0" + GOTESTSUM_VERSION: "1.9.0" CONSUL_BINARY_UPLOAD_NAME: consul.exe # strip the hashicorp/ off the front of github.repository for consul CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'consul' }} @@ -62,7 +62,7 @@ jobs: strategy: fail-fast: false matrix: - envoy-version: [ "1.28.1" ] + envoy-version: [ "1.26.7" ] xds-target: [ "server", "client" ] env: ENVOY_VERSION: ${{ matrix.envoy-version }} diff --git a/.github/workflows/test-integrations.yml b/.github/workflows/test-integrations.yml index 58e477fd15681..9cf687ea18b0a 100644 --- a/.github/workflows/test-integrations.yml +++ b/.github/workflows/test-integrations.yml @@ -19,7 +19,7 @@ env: TEST_RESULTS_ARTIFACT_NAME: test-results CONSUL_LICENSE: ${{ secrets.CONSUL_LICENSE }} GOTAGS: ${{ endsWith(github.repository, '-enterprise') && 'consulent' || '' }} - GOTESTSUM_VERSION: "1.11.0" + GOTESTSUM_VERSION: "1.10.1" CONSUL_BINARY_UPLOAD_NAME: consul-bin # strip the hashicorp/ off the front of github.repository for consul CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'hashicorp/consul' }} @@ -56,7 +56,7 @@ jobs: compute-xl: ${{ steps.runners.outputs.compute-xl }} enterprise: ${{ steps.runners.outputs.enterprise }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 - id: runners run: .github/scripts/get_runner_classes.sh @@ -86,16 +86,16 @@ jobs: contents: read strategy: matrix: - nomad-version: ['v1.7.3', 'v1.6.6', 'v1.5.13'] + nomad-version: ['v1.3.3', 'v1.2.10', 'v1.1.16'] steps: - name: Checkout Nomad - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 with: repository: hashicorp/nomad ref: ${{ matrix.nomad-version }} - name: Install Go - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: # Do not explicitly set Go version here, as it should depend on what Nomad declares. go-version-file: 'go.mod' @@ -117,7 +117,7 @@ jobs: run: | go install gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} && \ gotestsum \ - --format=github-actions \ + --format=short-verbose \ --rerun-fails \ --rerun-fails-report=/tmp/gotestsum-rerun-fails \ --packages="./command/agent/consul" \ @@ -167,18 +167,18 @@ jobs: contents: read strategy: matrix: - vault-version: ["1.15.4", "1.14.8", "1.13.12"] + vault-version: ["1.13.1", "1.12.5", "1.11.9", "1.10.11"] env: VAULT_BINARY_VERSION: ${{ matrix.vault-version }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - name: Setup Git if: ${{ endsWith(github.repository, '-enterprise') }} run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: # We use the current Consul Go version here since Vault is installed as a binary # and tests are run from the Consul repo. @@ -194,17 +194,17 @@ jobs: run: | mkdir -p "${{ env.TEST_RESULTS_DIR }}" go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --format=github-actions \ + --format=short-verbose \ --junitfile "${{ env.TEST_RESULTS_DIR }}/gotestsum-report.xml" \ -- -tags "${{ env.GOTAGS }}" -cover -coverprofile=coverage.txt ./agent/connect/ca # Run leader tests that require Vault go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --format=github-actions \ + --format=short-verbose \ --junitfile "${{ env.TEST_RESULTS_DIR }}/gotestsum-report-leader.xml" \ -- -tags "${{ env.GOTAGS }}" -cover -coverprofile=coverage-leader.txt -run Vault ./agent/consul # Run agent tests that require Vault go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --format=github-actions \ + --format=short-verbose \ --junitfile "${{ env.TEST_RESULTS_DIR }}/gotestsum-report-agent.xml" \ -- -tags "${{ env.GOTAGS }}" -cover -coverprofile=coverage-agent.txt -run Vault ./agent @@ -263,16 +263,16 @@ jobs: outputs: envoy-matrix: ${{ steps.set-matrix.outputs.envoy-matrix }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 - name: Generate Envoy Job Matrix id: set-matrix env: # this is further going to multiplied in envoy-integration tests by the # other dimensions in the matrix. Currently TOTAL_RUNNERS would be # multiplied by 2 based on these values: - # envoy-version: ["1.28.1"] + # envoy-version: ["1.26.7"] # xds-target: ["server", "client"] - TOTAL_RUNNERS: 2 + TOTAL_RUNNERS: 4 JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]' run: | NUM_RUNNERS=$TOTAL_RUNNERS @@ -305,7 +305,7 @@ jobs: strategy: fail-fast: false matrix: - envoy-version: ["1.28.1"] + envoy-version: ["1.26.7"] xds-target: ["server", "client"] test-cases: ${{ fromJSON(needs.generate-envoy-job-matrices.outputs.envoy-matrix) }} env: @@ -313,8 +313,8 @@ jobs: XDS_TARGET: ${{ matrix.xds-target }} AWS_LAMBDA_REGION: us-west-2 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: go-version: ${{ needs.get-go-version.outputs.go-version }} @@ -327,7 +327,7 @@ jobs: run: chmod +x ./bin/consul - name: Set up Docker Buildx - uses: docker/setup-buildx-action@2a1a44ac4aa01993040736bd95bb470da1a38365 # v2.9.0 + uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1 - name: Docker build run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile ./bin @@ -386,7 +386,7 @@ jobs: run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml compatibility-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} # NOTE: do not change without tuning the -p and -parallel flags in go test. + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - get-go-version @@ -395,15 +395,10 @@ jobs: id-token: write # NOTE: this permission is explicitly required for Vault auth. contents: read env: - ENVOY_VERSION: "1.28.1" - CONSUL_DATAPLANE_IMAGE: "docker.io/hashicorppreview/consul-dataplane:1.3-dev-ubi" + ENVOY_VERSION: "1.25.4" steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0 + - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: go-version: ${{ needs.get-go-version.outputs.go-version }} - run: go env @@ -428,8 +423,6 @@ jobs: - name: Retry Build consul-envoy:target-version image if: steps.buildConsulEnvoyImage.outcome == 'failure' run: docker build -t consul-envoy:target-version --build-arg CONSUL_IMAGE=${{ env.CONSUL_LATEST_IMAGE_NAME }}:local --build-arg ENVOY_VERSION=${{ env.ENVOY_VERSION }} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets - - name: Build consul-dataplane:local image - run: docker build -t consul-dataplane:local --build-arg CONSUL_IMAGE=${{ env.CONSUL_LATEST_IMAGE_NAME }}:local --build-arg CONSUL_DATAPLANE_IMAGE=${{ env.CONSUL_DATAPLANE_IMAGE }} -f ./test/integration/consul-container/assets/Dockerfile-consul-dataplane ./test/integration/consul-container/assets - name: Configure GH workaround for ipv6 loopback if: ${{ !endsWith(github.repository, '-enterprise') }} run: | @@ -443,8 +436,9 @@ jobs: docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ --raw-command \ - --format=github-actions \ - --rerun-fails \ + --format=standard-verbose \ + --debug \ + --rerun-fails=3 \ -- \ go test \ -p=6 \ @@ -452,7 +446,7 @@ jobs: -tags "${{ env.GOTAGS }}" \ -timeout=30m \ -json \ - `go list -tags "${{ env.GOTAGS }}" ./... | grep -v upgrade | grep -v peering_commontopo` \ + `go list ./... | grep -v upgrade` \ --target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \ --target-version local \ --latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \ @@ -498,90 +492,6 @@ jobs: DD_ENV: ci run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml - integration-test-with-deployer: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} - needs: - - setup - - get-go-version - permissions: - id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read - strategy: - fail-fast: false - env: - DEPLOYER_CONSUL_DATAPLANE_IMAGE: "docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.3-dev" - steps: - - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version: ${{ needs.get-go-version.outputs.go-version }} - - run: go env - - name: Build image - run: make test-deployer-setup - - name: Integration Tests - run: | - mkdir -p "${{ env.TEST_RESULTS_DIR }}" - #export NOLOGBUFFER=1 - cd ./test-integ - go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --raw-command \ - --format=github-actions \ - -- \ - go test \ - -tags "${{ env.GOTAGS }}" \ - -timeout=20m \ - -parallel=2 \ - -failfast \ - -json \ - `go list -tags "${{ env.GOTAGS }}" ./... | grep -v peering_commontopo | grep -v upgrade ` \ - --target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --target-version local \ - --latest-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --latest-version latest - env: - # this is needed because of incompatibility between RYUK container and GHA - GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml - GOTESTSUM_FORMAT: standard-verbose - COMPOSE_INTERACTIVE_NO_CLI: 1 - # tput complains if this isn't set to something. - TERM: ansi - # NOTE: ENT specific step as we store secrets in Vault. - - name: Authenticate to Vault - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: vault-auth - run: vault-auth - - # NOTE: ENT specific step as we store secrets in Vault. - - name: Fetch Secrets - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: secrets - uses: hashicorp/vault-action@v2.5.0 - with: - url: ${{ steps.vault-auth.outputs.addr }} - caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} - token: ${{ steps.vault-auth.outputs.token }} - secrets: | - kv/data/github/${{ github.repository }}/datadog apikey | DATADOG_API_KEY; - - - name: prepare datadog-ci - if: ${{ !cancelled() && !endsWith(github.repository, '-enterprise') }} - run: | - curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci" - chmod +x /usr/local/bin/datadog-ci - - - name: upload coverage - # do not run on forks - if: ${{ !cancelled() && github.event.pull_request.head.repo.full_name == github.repository }} - env: - DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}" - DD_ENV: ci - run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml - test-integrations-success: needs: - conditional-skip @@ -592,7 +502,6 @@ jobs: - generate-envoy-job-matrices - envoy-integration-test - compatibility-integration-test - - integration-test-with-deployer runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} if: always() && needs.conditional-skip.outputs.skip-ci != 'true' steps: diff --git a/.github/workflows/verify-envoy-version.yml b/.github/workflows/verify-envoy-version.yml index dafa9db6f22a6..7c46453aec128 100644 --- a/.github/workflows/verify-envoy-version.yml +++ b/.github/workflows/verify-envoy-version.yml @@ -13,7 +13,7 @@ on: branches: - main - release/** - + env: SKIP_VERIFY_ENVOY_VERSION: ${{ vars.SKIP_VERIFY_ENVOY_VERSION }} @@ -21,11 +21,11 @@ jobs: verify-envoy-version: runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # by default the checkout action doesn't checkout all branches - name: Run Envoy Version Verification for main and release branches run: ./.github/scripts/verify_envoy_version.sh env: - GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} \ No newline at end of file + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 9649c4a8cb988..a48d19b74cc22 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,6 @@ .vagrant/ /pkg bin/ -workdir/ changelog.tmp exit-code Thumbs.db @@ -19,7 +18,6 @@ Thumbs.db __debug_bin coverage.out *.tmp -.zed # MacOS .DS_Store @@ -70,4 +68,3 @@ override.tf.json terraform.rc /go.work /go.work.sum -.docker diff --git a/.golangci.yml b/.golangci.yml index b87fa40a2e613..54fd8468abaad 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 linters: disable-all: true @@ -28,11 +28,6 @@ issues: - linters: [staticcheck] text: 'SA1019: "io/ioutil" has been deprecated since Go 1.16' - # Allow usage of deprecated values. - - linters: [ staticcheck ] - text: 'SA1019:' - path: "(agent/grpc-external)" - # An argument that always receives the same value is often not a problem. - linters: [unparam] text: "always receives" diff --git a/.grpcmocks.yaml b/.grpcmocks.yaml deleted file mode 100644 index 97949b9ff2cf4..0000000000000 --- a/.grpcmocks.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -with-expecter: true -recursive: true -include-regex: ".*" -exclude-regex: "(serverStream|Is(Inmem|Cloning).*Client)" -# We don't want the mocks within proto-public to prevent forcing a dependency -# of the testify library on the modules usage. The mocks are only for -# internal testing purposes. Other consumers can generated the mocks into -# their own code base. -dir: "grpcmocks/{{.InterfaceDirRelative}}" -outpkg: "mock{{.PackageName}}" -mockname: "{{.InterfaceName}}" -packages: - github.com/hashicorp/consul/proto-public/pbacl: - github.com/hashicorp/consul/proto-public/pbconnectca: - github.com/hashicorp/consul/proto-public/pbdataplane: - github.com/hashicorp/consul/proto-public/pbserverdiscovery: - github.com/hashicorp/consul/proto-public/pbresource: - github.com/hashicorp/consul/proto-public/pbdns: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index ec68f1133db8e..0000000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# See https://pre-commit.com for more information -# See https://pre-commit.com/hooks.html for more hooks -# -# Opt-in to running pre-commit hooks by running `make tools`. -# -# Guidelines for adding new pre-commit hooks -# ==================================================================== -# A hook SHOULD be blazingly fast (<2s) to impose minimal latency on -# developer workflows (e.g. golangci-lint takes > 8s) -# -# A hook SHOULD attempt to fix errors, not just identify them. -# -# A hook SHOULD address common errors in files that tend to change -# frequently. While surfacing esoteric issues is nice, hooks that have a -# wider impact are preferred. -# -repos: -- repo: https://github.com/tekwizely/pre-commit-golang - rev: v1.0.0-rc.1 - hooks: - # Formats go imports into deterministic sections. - # `pre-commit run gci` to run in isolation. - - id: my-cmd - name: Format go imports - alias: gci - # skip all generated go files - exclude: | - (?x)( - ^proto-public/| - ^proto/| - ^agent/xds/z_xds_packages\.go$| - ^testing/deployer/topology/default_versions\.go$| - \.deepcopy\.go$| - \.gen\.go$| - \.pb\.go$| - \.pb\.binary\.go$| - generated_funcs\.go$| - _generated_test\.go$| - mock_.+\.go$ - ) - args: - - "gci" - - "write" - - "--section" - - "standard" - - "--section" - - "default" - - "--section" - - "prefix(github.com/hashicorp/)" - - "--section" - - "prefix(github.com/hashicorp/consul/)" diff --git a/.release/ci.hcl b/.release/ci.hcl index d11983b460574..dfe69d2fc1ebd 100644 --- a/.release/ci.hcl +++ b/.release/ci.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 schema = "1" diff --git a/.release/docker/docker-entrypoint-ubi.sh b/.release/docker/docker-entrypoint-ubi.sh index 96e70df925672..a932ad7286e28 100755 --- a/.release/docker/docker-entrypoint-ubi.sh +++ b/.release/docker/docker-entrypoint-ubi.sh @@ -1,6 +1,6 @@ #!/usr/bin/dumb-init /bin/sh # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 set -e diff --git a/.release/docker/docker-entrypoint-windows.sh b/.release/docker/docker-entrypoint-windows.sh deleted file mode 100644 index f6aac9afaeca7..0000000000000 --- a/.release/docker/docker-entrypoint-windows.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/dumb-init /bin/sh -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -# Note above that we run dumb-init as PID 1 in order to reap zombie processes -# as well as forward signals to all processes in its session. Normally, sh -# wouldn't do either of these functions so we'd leak zombies as well as do -# unclean termination of all our sub-processes. -# As of docker 1.13, using docker run --init achieves the same outcome. - -# You can set CONSUL_BIND_INTERFACE to the name of the interface you'd like to -# bind to and this will look up the IP and pass the proper -bind= option along -# to Consul. -CONSUL_BIND= -if [ -n "$CONSUL_BIND_INTERFACE" ]; then - CONSUL_BIND_ADDRESS=$(ip -o -4 addr list $CONSUL_BIND_INTERFACE | head -n1 | awk '{print $4}' | cut -d/ -f1) - if [ -z "$CONSUL_BIND_ADDRESS" ]; then - echo "Could not find IP for interface '$CONSUL_BIND_INTERFACE', exiting" - exit 1 - fi - - CONSUL_BIND="-bind=$CONSUL_BIND_ADDRESS" - echo "==> Found address '$CONSUL_BIND_ADDRESS' for interface '$CONSUL_BIND_INTERFACE', setting bind option..." -fi - -# You can set CONSUL_CLIENT_INTERFACE to the name of the interface you'd like to -# bind client intefaces (HTTP, DNS, and RPC) to and this will look up the IP and -# pass the proper -client= option along to Consul. -CONSUL_CLIENT= -if [ -n "$CONSUL_CLIENT_INTERFACE" ]; then - CONSUL_CLIENT_ADDRESS=$(ip -o -4 addr list $CONSUL_CLIENT_INTERFACE | head -n1 | awk '{print $4}' | cut -d/ -f1) - if [ -z "$CONSUL_CLIENT_ADDRESS" ]; then - echo "Could not find IP for interface '$CONSUL_CLIENT_INTERFACE', exiting" - exit 1 - fi - - CONSUL_CLIENT="-client=$CONSUL_CLIENT_ADDRESS" - echo "==> Found address '$CONSUL_CLIENT_ADDRESS' for interface '$CONSUL_CLIENT_INTERFACE', setting client option..." -fi - -# CONSUL_DATA_DIR is exposed as a volume for possible persistent storage. The -# CONSUL_CONFIG_DIR isn't exposed as a volume but you can compose additional -# config files in there if you use this image as a base, or use CONSUL_LOCAL_CONFIG -# below. -CONSUL_DATA_DIR=C:\\consul\\data -CONSUL_CONFIG_DIR=C:\\consul\\config - -# You can also set the CONSUL_LOCAL_CONFIG environemnt variable to pass some -# Consul configuration JSON without having to bind any volumes. -if [ -n "$CONSUL_LOCAL_CONFIG" ]; then - echo "$CONSUL_LOCAL_CONFIG" > "$CONSUL_CONFIG_DIR/local.json" -fi - -# If the user is trying to run Consul directly with some arguments, then -# pass them to Consul. -if [ "${1:0:1}" = '-' ]; then - set -- consul "$@" -fi - -# Look for Consul subcommands. -if [ "$1" = 'agent' ]; then - shift - set -- consul agent \ - -data-dir="$CONSUL_DATA_DIR" \ - -config-dir="$CONSUL_CONFIG_DIR" \ - $CONSUL_BIND \ - $CONSUL_CLIENT \ - "$@" -elif [ "$1" = 'version' ]; then - # This needs a special case because there's no help output. - set -- consul "$@" -elif consul --help "$1" 2>&1 | grep -q "consul $1"; then - # We can't use the return code to check for the existence of a subcommand, so - # we have to use grep to look for a pattern in the help output. - set -- consul "$@" -fi - -# NOTE: Unlike in the regular Consul Docker image, we don't have code here -# for changing data-dir directory ownership or using su-exec because OpenShift -# won't run this container as root and so we can't change data dir ownership, -# and there's no need to use su-exec. - -exec "$@" \ No newline at end of file diff --git a/.release/docker/docker-entrypoint.sh b/.release/docker/docker-entrypoint.sh index a544809643e00..c169576b6cf85 100755 --- a/.release/docker/docker-entrypoint.sh +++ b/.release/docker/docker-entrypoint.sh @@ -1,6 +1,6 @@ #!/usr/bin/dumb-init /bin/sh # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 set -e diff --git a/.release/linux/package/etc/consul.d/consul.hcl b/.release/linux/package/etc/consul.d/consul.hcl index b25f186858564..b54644b2f9ccd 100644 --- a/.release/linux/package/etc/consul.d/consul.hcl +++ b/.release/linux/package/etc/consul.d/consul.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 # Full configuration options can be found at https://www.consul.io/docs/agent/config diff --git a/.release/release-metadata.hcl b/.release/release-metadata.hcl index 963192fc4b80e..8de2623fdee4e 100644 --- a/.release/release-metadata.hcl +++ b/.release/release-metadata.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 url_docker_registry_dockerhub = "https://hub.docker.com/r/hashicorp/consul" url_docker_registry_ecr = "https://gallery.ecr.aws/hashicorp/consul" diff --git a/.release/security-scan.hcl b/.release/security-scan.hcl index 6a784734e9bc3..5f70ffb443313 100644 --- a/.release/security-scan.hcl +++ b/.release/security-scan.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 # These scan results are run as part of CRT workflows. diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a192c7e6480b..876a60f05ee86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,30 +1,3 @@ -## 1.17.3 (February 13, 2024) - -SECURITY: - -* mesh: Update Envoy versions to 1.27.3 and 1.26.7 to address [CVE-2024-23324](https://github.com/envoyproxy/envoy/security/advisories/GHSA-gq3v-vvhj-96j6), [CVE-2024-23325](https://github.com/envoyproxy/envoy/security/advisories/GHSA-5m7c-mrwr-pm26), [CVE-2024-23322](https://github.com/envoyproxy/envoy/security/advisories/GHSA-6p83-mfmh-qv38), [CVE-2024-23323](https://github.com/envoyproxy/envoy/security/advisories/GHSA-x278-4w4x-r7ch), [CVE-2024-23327](https://github.com/envoyproxy/envoy/security/advisories/GHSA-4h5x-x9vh-m29j), and [CVE-2023-44487](https://github.com/envoyproxy/envoy/security/advisories/GHSA-jhv4-f7mr-xx76) [[GH-20587](https://github.com/hashicorp/consul/issues/20587)] - -FEATURES: - -* cli: Adds new command `exported-services` to list all services exported and their consumers. Refer to the [CLI docs](https://developer.hashicorp.com/consul/commands/exported-services) for more information. [[GH-20331](https://github.com/hashicorp/consul/issues/20331)] - -IMPROVEMENTS: - -* ProxyCfg: avoid setting a watch on `Internal.ServiceDump` when mesh gateway is not used. [[GH-20168](https://github.com/hashicorp/consul/issues/20168)] -* ProxyCfg: only return the nodes list when querying the `Internal.ServiceDump` watch from proxycfg [[GH-20168](https://github.com/hashicorp/consul/issues/20168)] -* Upgrade to use Go 1.21.7. [[GH-20545](https://github.com/hashicorp/consul/issues/20545)] -* api: add a new api(/v1/exported-services) to list all the exported service and their consumers. [[GH-20015](https://github.com/hashicorp/consul/issues/20015)] -* connect: Add `CaseInsensitive` flag to service-routers that allows paths and path prefixes to ignore URL upper and lower casing. [[GH-19647](https://github.com/hashicorp/consul/issues/19647)] - -BUG FIXES: - -* audit-logs: **(Enterprise Only)** Fixes non ASCII characters in audit logs because of gzip. [[GH-20345](https://github.com/hashicorp/consul/issues/20345)] -* connect: Fix issue where re-persisting existing proxy-defaults using `http` protocol fails with a protocol-mismatch error. [[GH-20481](https://github.com/hashicorp/consul/issues/20481)] -* connect: Fix regression with SAN matching on terminating gateways [GH-20360](https://github.com/hashicorp/consul/issues/20360) [[GH-20417](https://github.com/hashicorp/consul/issues/20417)] -* connect: Remove code coupling where the xDS capacity controller could negatively affect raft autopilot performance. [[GH-20511](https://github.com/hashicorp/consul/issues/20511)] -* logging: add /api prefix to v2 resource endpoint logs [[GH-20352](https://github.com/hashicorp/consul/issues/20352)] -* mesh: Fix bug where envoy extensions could not be configured with "permissive" mTLS mode. Note that envoy extensions currently do not apply to non-mTLS traffic in permissive mode. [[GH-20406](https://github.com/hashicorp/consul/issues/20406)] - ## 1.16.6 (February 13, 2024) SECURITY: @@ -45,52 +18,6 @@ BUG FIXES: * connect: Remove code coupling where the xDS capacity controller could negatively affect raft autopilot performance. [[GH-20511](https://github.com/hashicorp/consul/issues/20511)] * mesh: Fix bug where envoy extensions could not be configured with "permissive" mTLS mode. Note that envoy extensions currently do not apply to non-mTLS traffic in permissive mode. [[GH-20406](https://github.com/hashicorp/consul/issues/20406)] -## 1.15.10 (February 13, 2024) - -SECURITY: - -* mesh: Update Envoy versions to 1.28.1, 1.27.3, and 1.26.7 to address [CVE-2024-23324](https://github.com/envoyproxy/envoy/security/advisories/GHSA-gq3v-vvhj-96j6), [CVE-2024-23325](https://github.com/envoyproxy/envoy/security/advisories/GHSA-5m7c-mrwr-pm26), [CVE-2024-23322](https://github.com/envoyproxy/envoy/security/advisories/GHSA-6p83-mfmh-qv38), [CVE-2024-23323](https://github.com/envoyproxy/envoy/security/advisories/GHSA-x278-4w4x-r7ch), [CVE-2024-23327](https://github.com/envoyproxy/envoy/security/advisories/GHSA-4h5x-x9vh-m29j), and [CVE-2023-44487](https://github.com/envoyproxy/envoy/security/advisories/GHSA-jhv4-f7mr-xx76) [[GH-20590](https://github.com/hashicorp/consul/issues/20590)] - -IMPROVEMENTS: - -* ProxyCfg: avoid setting a watch on `Internal.ServiceDump` when mesh gateway is not used. [[GH-20168](https://github.com/hashicorp/consul/issues/20168)] -* ProxyCfg: only return the nodes list when querying the `Internal.ServiceDump` watch from proxycfg [[GH-20168](https://github.com/hashicorp/consul/issues/20168)] -* Upgrade to use Go 1.21.7. [[GH-20545](https://github.com/hashicorp/consul/issues/20545)] -* mesh: update supported envoy version 1.28.0 in addition to 1.27.2, 1.26.6 to support LTS release [[GH-20323](https://github.com/hashicorp/consul/issues/20323)] - -BUG FIXES: - -* audit-logs: **(Enterprise Only)** Fixes non ASCII characters in audit logs because of gzip. [[GH-20345](https://github.com/hashicorp/consul/issues/20345)] -* connect: Fix issue where re-persisting existing proxy-defaults using `http` protocol fails with a protocol-mismatch error. [[GH-20481](https://github.com/hashicorp/consul/issues/20481)] -* connect: Remove code coupling where the xDS capacity controller could negatively affect raft autopilot performance. [[GH-20511](https://github.com/hashicorp/consul/issues/20511)] - -## 1.17.2 (January 23, 2024) - -KNOWN ISSUES: - -* connect: Consul versions 1.17.2 and 1.16.5 perform excessively strict TLS SAN verification on terminating gateways, which prevents connections outside of the mesh to upstream services. Terminating gateway users are advised to avoid deploying these Consul versions. A fix will be present in a future release of Consul 1.17.3 and 1.16.6. [[GH-20360](https://github.com/hashicorp/consul/issues/20360)] - -SECURITY: - -* Upgrade OpenShift container images to use `ubi9-minimal:9.3` as the base image. [[GH-20014](https://github.com/hashicorp/consul/issues/20014)] - -IMPROVEMENTS: - -* connect: Remove usage of deprecated Envoy field `match_subject_alt_names` in favor of `match_typed_subject_alt_names`. [[GH-19954](https://github.com/hashicorp/consul/issues/19954)] -* connect: replace usage of deprecated Envoy field `envoy.config.router.v3.WeightedCluster.total_weight`. [[GH-20011](https://github.com/hashicorp/consul/issues/20011)] -* xds: Replace usage of deprecated Envoy field `envoy.config.cluster.v3.Cluster.http_protocol_options` [[GH-20010](https://github.com/hashicorp/consul/issues/20010)] -* xds: remove usages of deprecated Envoy fields: `envoy.config.cluster.v3.Cluster.http2_protocol_options`, `envoy.config.bootstrap.v3.Admin.access_log_path` [[GH-19940](https://github.com/hashicorp/consul/issues/19940)] -* xds: replace usage of deprecated Envoy field `envoy.extensions.filters.http.lua.v3.Lua.inline_code` [[GH-20012](https://github.com/hashicorp/consul/issues/20012)] - -DEPRECATIONS: - -* cli: Deprecate the `-admin-access-log-path` flag from `consul connect envoy` command in favor of: `-admin-access-log-config`. [[GH-19943](https://github.com/hashicorp/consul/issues/19943)] - -BUG FIXES: - -* prepared-query: (Enterprise-only) Fix issue where sameness-group failover targets to peers would attempt to query data from the default partition, rather than the sameness-group's partition always. -* ui: update token list on Role details page to show only linked tokens [[GH-19912](https://github.com/hashicorp/consul/issues/19912)] - ## 1.16.5 (January 23, 2024) KNOWN ISSUES: @@ -116,68 +43,6 @@ BUG FIXES: * prepared-query: (Enterprise-only) Fix issue where sameness-group failover targets to peers would attempt to query data from the default partition, rather than the sameness-group's partition always. * ui: update token list on Role details page to show only linked tokens [[GH-19912](https://github.com/hashicorp/consul/issues/19912)] -## 1.15.9 (January 23, 2024) - -SECURITY: - -* Update RSA key generation to use a key size of at least 2048 bits. [[GH-20112](https://github.com/hashicorp/consul/issues/20112)] -* Upgrade OpenShift container images to use `ubi9-minimal:9.3` as the base image. [[GH-20014](https://github.com/hashicorp/consul/issues/20014)] - -IMPROVEMENTS: - -* Upgrade to use Go 1.21.6. [[GH-20062](https://github.com/hashicorp/consul/issues/20062)] - -BUG FIXES: - -* ui: update token list on Role details page to show only linked tokens [[GH-19912](https://github.com/hashicorp/consul/issues/19912)] - -## 1.17.1 (December 12, 2023) - -SECURITY: - -* Update `github.com/golang-jwt/jwt/v4` to v4.5.0 to address [PRISMA-2022-0270](https://github.com/golang-jwt/jwt/issues/258). [[GH-19705](https://github.com/hashicorp/consul/issues/19705)] -* Upgrade to use Go 1.20.12. This resolves CVEs - [CVE-2023-45283](https://nvd.nist.gov/vuln/detail/CVE-2023-45283): (`path/filepath`) recognize \??\ as a Root Local Device path prefix (Windows) - [CVE-2023-45284](https://nvd.nist.gov/vuln/detail/CVE-2023-45285): recognize device names with trailing spaces and superscripts (Windows) - [CVE-2023-39326](https://nvd.nist.gov/vuln/detail/CVE-2023-39326): (`net/http`) limit chunked data overhead - [CVE-2023-45285](https://nvd.nist.gov/vuln/detail/CVE-2023-45285): (`cmd/go`) go get may unexpectedly fallback to insecure git [[GH-19840](https://github.com/hashicorp/consul/issues/19840)] -* connect: update supported envoy versions to 1.24.12, 1.25.11, 1.26.6, 1.27.2 to address [CVE-2023-44487](https://github.com/envoyproxy/envoy/security/advisories/GHSA-jhv4-f7mr-xx76) [[GH-19274](https://github.com/hashicorp/consul/issues/19274)] - -FEATURES: - -* acl: Adds nomad client templated policy [[GH-19827](https://github.com/hashicorp/consul/issues/19827)] -* cli: Adds new subcommand `peering exported-services` to list services exported to a peer . Refer to the [CLI docs](https://developer.hashicorp.com/consul/commands/peering) for more information. [[GH-19821](https://github.com/hashicorp/consul/issues/19821)] - -IMPROVEMENTS: - -* mesh: parse the proxy-defaults protocol when write the config-entry to avoid parsing it when compiling the discovery chain. [[GH-19829](https://github.com/hashicorp/consul/issues/19829)] -* wan-federation: use a hash to diff config entries when replicating in the secondary DC to avoid unnecessary writes.. [[GH-19795](https://github.com/hashicorp/consul/issues/19795)] -* Replaces UI Side Nav with Helios Design System Side Nav. Adds dc/partition/namespace searching in Side Nav. [[GH-19342](https://github.com/hashicorp/consul/issues/19342)] -* acl: add api-gateway templated policy [[GH-19728](https://github.com/hashicorp/consul/issues/19728)] -* acl: add templated policy descriptions [[GH-19735](https://github.com/hashicorp/consul/issues/19735)] -* api: Add support for listing ACL tokens by service name when using templated policies. [[GH-19666](https://github.com/hashicorp/consul/issues/19666)] -* cli: stop simultaneous usage of -templated-policy and -templated-policy-file when creating a role or token. [[GH-19389](https://github.com/hashicorp/consul/issues/19389)] -* cloud: push additional server TLS metadata to HCP [[GH-19682](https://github.com/hashicorp/consul/issues/19682)] -* connect: Default `stats_flush_interval` to 60 seconds when using the Consul Telemetry Collector, unless custom stats sink are present or an explicit flush interval is configured. [[GH-19663](https://github.com/hashicorp/consul/issues/19663)] -* metrics: increment consul.client.rpc.failed if RPC fails because no servers are accessible [[GH-19721](https://github.com/hashicorp/consul/issues/19721)] -* metrics: modify consul.client.rpc metric to exclude internal retries for consistency with consul.client.rpc.exceeded and consul.client.rpc.failed [[GH-19721](https://github.com/hashicorp/consul/issues/19721)] -* ui: move nspace and partitions requests into their selector menus [[GH-19594](https://github.com/hashicorp/consul/issues/19594)] - -BUG FIXES: - -* CLI: fix a panic when deleting a non existing policy by name. [[GH-19679](https://github.com/hashicorp/consul/issues/19679)] -* Mesh Gateways: Fix a bug where replicated and peered mesh gateways with hostname-based WAN addresses fail to initialize. [[GH-19268](https://github.com/hashicorp/consul/issues/19268)] -* ca: Fix bug with Vault CA provider where renewing a retracted token would cause retries in a tight loop, degrading performance. [[GH-19285](https://github.com/hashicorp/consul/issues/19285)] -* ca: Fix bug with Vault CA provider where token renewal goroutines could leak if CA failed to initialize. [[GH-19285](https://github.com/hashicorp/consul/issues/19285)] -* connect: Solves an issue where two upstream services with the same name in different namespaces were not getting routed to correctly by API Gateways. [[GH-19860](https://github.com/hashicorp/consul/issues/19860)] -* federation: **(Enterprise Only)** Fixed an issue where namespace reconciliation could result into the secondary having dangling instances of namespaces marked for deletion -* ui: clear peer on home logo link [[GH-19549](https://github.com/hashicorp/consul/issues/19549)] -* ui: fix being able to view peered services from non-default namnespaces [[GH-19586](https://github.com/hashicorp/consul/issues/19586)] -* ui: stop manually reconciling services if peering is enabled [[GH-19907](https://github.com/hashicorp/consul/issues/19907)] -* wan-federation: Fix a bug where servers wan-federated through mesh-gateways could crash due to overlapping LAN IP addresses. [[GH-19503](https://github.com/hashicorp/consul/issues/19503)] -* xds: Add configurable `xds_fetch_timeout_ms` option to proxy registrations that allows users to prevent endpoints from dropping when they have proxies with a large number of upstreams. [[GH-19871](https://github.com/hashicorp/consul/issues/19871)] -* xds: ensure child resources are re-sent to Envoy when the parent is updated even if the child already has pending updates. [[GH-19866](https://github.com/hashicorp/consul/issues/19866)] - ## 1.16.4 (December 12, 2023) SECURITY: @@ -210,131 +75,6 @@ BUG FIXES: * xds: Add configurable `xds_fetch_timeout_ms` option to proxy registrations that allows users to prevent endpoints from dropping when they have proxies with a large number of upstreams. [[GH-19871](https://github.com/hashicorp/consul/issues/19871)] * xds: ensure child resources are re-sent to Envoy when the parent is updated even if the child already has pending updates. [[GH-19866](https://github.com/hashicorp/consul/issues/19866)] -## 1.15.8 (December 12, 2023) - -SECURITY: - -* Update `github.com/golang-jwt/jwt/v4` to v4.5.0 to address [PRISMA-2022-0270](https://github.com/golang-jwt/jwt/issues/258). [[GH-19705](https://github.com/hashicorp/consul/issues/19705)] -* Upgrade to use Go 1.20.12. This resolves CVEs - [CVE-2023-45283](https://nvd.nist.gov/vuln/detail/CVE-2023-45283): (`path/filepath`) recognize \??\ as a Root Local Device path prefix (Windows) - [CVE-2023-45284](https://nvd.nist.gov/vuln/detail/CVE-2023-45285): recognize device names with trailing spaces and superscripts (Windows) - [CVE-2023-39326](https://nvd.nist.gov/vuln/detail/CVE-2023-39326): (`net/http`) limit chunked data overhead - [CVE-2023-45285](https://nvd.nist.gov/vuln/detail/CVE-2023-45285): (`cmd/go`) go get may unexpectedly fallback to insecure git [[GH-19840](https://github.com/hashicorp/consul/issues/19840)] - -IMPROVEMENTS: - -* mesh: parse the proxy-defaults protocol when write the config-entry to avoid parsing it when compiling the discovery chain. [[GH-19829](https://github.com/hashicorp/consul/issues/19829)] -* wan-federation: use a hash to diff config entries when replicating in the secondary DC to avoid unnecessary writes.. [[GH-19795](https://github.com/hashicorp/consul/issues/19795)] -* cli: Adds cli support for checking TCP connection for ports. If -ports flag is not given, it will check for - default ports of consul listed here - https://developer.hashicorp.com/consul/docs/install/ports [[GH-18329](https://github.com/hashicorp/consul/issues/18329)] -* connect: Default `stats_flush_interval` to 60 seconds when using the Consul Telemetry Collector, unless custom stats sink are present or an explicit flush interval is configured. [[GH-19663](https://github.com/hashicorp/consul/issues/19663)] -* metrics: increment consul.client.rpc.failed if RPC fails because no servers are accessible [[GH-19721](https://github.com/hashicorp/consul/issues/19721)] -* metrics: modify consul.client.rpc metric to exclude internal retries for consistency with consul.client.rpc.exceeded and consul.client.rpc.failed [[GH-19721](https://github.com/hashicorp/consul/issues/19721)] - -BUG FIXES: - -* CLI: fix a panic when deleting a non existing policy by name. [[GH-19679](https://github.com/hashicorp/consul/issues/19679)] -* connect: Solves an issue where two upstream services with the same name in different namespaces were not getting routed to correctly by API Gateways. [[GH-19860](https://github.com/hashicorp/consul/issues/19860)] -* federation: **(Enterprise Only)** Fixed an issue where namespace reconciliation could result into the secondary having dangling instances of namespaces marked for deletion -* ui: only show back-to-hcp link when url is present [[GH-19444](https://github.com/hashicorp/consul/issues/19444)] -* wan-federation: Fix a bug where servers wan-federated through mesh-gateways could crash due to overlapping LAN IP addresses. [[GH-19503](https://github.com/hashicorp/consul/issues/19503)] -* xds: Add configurable `xds_fetch_timeout_ms` option to proxy registrations that allows users to prevent endpoints from dropping when they have proxies with a large number of upstreams. [[GH-19871](https://github.com/hashicorp/consul/issues/19871)] -* xds: ensure child resources are re-sent to Envoy when the parent is updated even if the child already has pending updates. [[GH-19866](https://github.com/hashicorp/consul/issues/19866)] - -## 1.17.0 (October 31, 2023) - -BREAKING CHANGES: - -* api: RaftLeaderTransfer now requires an id string. An empty string can be specified to keep the old behavior. [[GH-17107](https://github.com/hashicorp/consul/issues/17107)] -* audit-logging: **(Enterprise only)** allowing timestamp based filename only on rotation. initially the filename will be just file.json [[GH-18668](https://github.com/hashicorp/consul/issues/18668)] - -DEPRECATIONS: - -* cli: Deprecate the `-admin-access-log-path` flag from `consul connect envoy` command in favor of: `-admin-access-log-config`. [[GH-15946](https://github.com/hashicorp/consul/issues/15946)] - -SECURITY: - -* Update `golang.org/x/net` to v0.17.0 to address [CVE-2023-39325](https://nvd.nist.gov/vuln/detail/CVE-2023-39325) -/ [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487)(`x/net/http2`). [[GH-19225](https://github.com/hashicorp/consul/issues/19225)] -* Upgrade Go to 1.20.10. -This resolves vulnerability [CVE-2023-39325](https://nvd.nist.gov/vuln/detail/CVE-2023-39325) -/ [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487)(`net/http`). [[GH-19225](https://github.com/hashicorp/consul/issues/19225)] -* Upgrade `google.golang.org/grpc` to 1.56.3. -This resolves vulnerability [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487). [[GH-19414](https://github.com/hashicorp/consul/issues/19414)] -* connect: update supported envoy versions to 1.24.12, 1.25.11, 1.26.6, 1.27.2 to address [CVE-2023-44487](https://github.com/envoyproxy/envoy/security/advisories/GHSA-jhv4-f7mr-xx76) [[GH-19275](https://github.com/hashicorp/consul/issues/19275)] - -FEATURE PREVIEW: **Catalog v2** - -This release provides the ability to preview Consul's v2 Catalog and Resource API if enabled. The new model supports -multi-port application deployments with only a single Envoy proxy. Note that the v1 and v2 catalogs are not cross -compatible, and not all Consul features are available within this v2 feature preview. See the [v2 Catalog and Resource -API documentation](https://developer.hashicorp.com/consul/docs/architecture/v2) for more information. The v2 Catalog and -Resources API should be considered a feature preview within this release and should not be used in production -environments. - -Limitations -* The v2 catalog API feature preview does not support connections with client agents. As a result, it is only available for Kubernetes deployments, which use [Consul dataplanes](consul/docs/connect/dataplane) instead of client agents. -* The v1 and v2 catalog APIs cannot run concurrently. -* The Consul UI does not support multi-port services or the v2 catalog API in this release. -* HCP Consul does not support multi-port services or the v2 catalog API in this release. - -Significant Pull Requests -* [[Catalog resource controllers]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/internal/catalog/internal/controllers) -* [[Mesh resource controllers]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/internal/mesh/internal/controllers) -* [[Auth resource controllers]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/internal/auth/internal) -* [[V2 Protobufs]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/proto-public) - -FEATURES: - -* Support custom watches on the Consul Controller framework. [[GH-18439](https://github.com/hashicorp/consul/issues/18439)] -* Windows: support consul connect envoy command on Windows [[GH-17694](https://github.com/hashicorp/consul/issues/17694)] -* acl: Add BindRule support for templated policies. Add new BindType: templated-policy and BindVar field for templated policy variables. [[GH-18719](https://github.com/hashicorp/consul/issues/18719)] -* acl: Add new `acl.tokens.dns` config field which specifies the token used implicitly during dns checks. [[GH-17936](https://github.com/hashicorp/consul/issues/17936)] -* acl: Added ACL Templated policies to simplify getting the right ACL token. [[GH-18708](https://github.com/hashicorp/consul/issues/18708)] -* acl: Adds a new ACL rule for workload identities [[GH-18769](https://github.com/hashicorp/consul/issues/18769)] -* acl: Adds workload identity templated policy [[GH-19077](https://github.com/hashicorp/consul/issues/19077)] -* api-gateway: Add support for response header modifiers on http-route configuration entry [[GH-18646](https://github.com/hashicorp/consul/issues/18646)] -* api-gateway: add retry and timeout filters [[GH-18324](https://github.com/hashicorp/consul/issues/18324)] -* cli: Add `bind-var` flag to `consul acl binding-rule` for templated policy variables. [[GH-18719](https://github.com/hashicorp/consul/issues/18719)] -* cli: Add `consul acl templated-policy` commands to read, list and preview templated policies. [[GH-18816](https://github.com/hashicorp/consul/issues/18816)] -* config-entry(api-gateway): (Enterprise only) Add GatewayPolicy to APIGateway Config Entry listeners -* config-entry(api-gateway): (Enterprise only) Add JWTFilter to HTTPRoute Filters -* dataplane: Allow getting bootstrap parameters when using V2 APIs [[GH-18504](https://github.com/hashicorp/consul/issues/18504)] -* gateway: **(Enterprise only)** Add JWT authentication and authorization to APIGateway Listeners and HTTPRoutes. -* mesh: **(Enterprise only)** Adds rate limiting config to service-defaults [[GH-18583](https://github.com/hashicorp/consul/issues/18583)] -* xds: Add a built-in Envoy extension that appends OpenTelemetry Access Logging (otel-access-logging) to the HTTP Connection Manager filter. [[GH-18336](https://github.com/hashicorp/consul/issues/18336)] -* xds: Add support for patching outbound listeners to the built-in Envoy External Authorization extension. [[GH-18336](https://github.com/hashicorp/consul/issues/18336)] - -IMPROVEMENTS: - -* raft: upgrade raft-wal library version to 0.4.1. [[GH-19314](https://github.com/hashicorp/consul/issues/19314)] -* xds: Use downstream protocol when connecting to local app [[GH-18573](https://github.com/hashicorp/consul/issues/18573)] -* Windows: Integration tests for Consul Windows VMs [[GH-18007](https://github.com/hashicorp/consul/issues/18007)] -* acl: Use templated policy to generate synthetic policies for tokens/roles with node and/or service identities [[GH-18813](https://github.com/hashicorp/consul/issues/18813)] -* api: added `CheckRegisterOpts` to Agent API [[GH-18943](https://github.com/hashicorp/consul/issues/18943)] -* api: added `Token` field to `ServiceRegisterOpts` type in Agent API [[GH-18983](https://github.com/hashicorp/consul/issues/18983)] -* ca: Vault CA provider config no longer requires root_pki_path for secondary datacenters [[GH-17831](https://github.com/hashicorp/consul/issues/17831)] -* cli: Added `-templated-policy`, `-templated-policy-file`, `-replace-templated-policy`, `-append-templated-policy`, `-replace-templated-policy-file`, `-append-templated-policy-file` and `-var` flags for creating or updating tokens/roles. [[GH-18708](https://github.com/hashicorp/consul/issues/18708)] -* config: Add new `tls.defaults.verify_server_hostname` configuration option. This specifies the default value for any interfaces that support the `verify_server_hostname` option. [[GH-17155](https://github.com/hashicorp/consul/issues/17155)] -* connect: update supported envoy versions to 1.24.10, 1.25.9, 1.26.4, 1.27.0 [[GH-18300](https://github.com/hashicorp/consul/issues/18300)] -* ui: Use Community verbiage [[GH-18560](https://github.com/hashicorp/consul/issues/18560)] - -BUG FIXES: - -* api: add custom marshal/unmarshal for ServiceResolverConfigEntry.RequestTimeout so config entries that set this field can be read using the API. [[GH-19031](https://github.com/hashicorp/consul/issues/19031)] -* ca: ensure Vault CA provider respects Vault Enterprise namespace configuration. [[GH-19095](https://github.com/hashicorp/consul/issues/19095)] -* catalog api: fixes a bug with catalog api where filter query parameter was not working correctly for the `/v1/catalog/services` endpoint [[GH-18322](https://github.com/hashicorp/consul/issues/18322)] -* connect: **(Enterprise only)** Fix bug where incorrect service-defaults entries were fetched to determine an upstream's protocol whenever the upstream did not explicitly define the namespace / partition. When this bug occurs, upstreams would use the protocol from a service-default entry in the default namespace / partition, rather than their own namespace / partition. -* connect: Fix bug where uncleanly closed xDS connections would influence connection balancing for too long and prevent envoy instances from starting. Two new configuration fields -`performance.grpc_keepalive_timeout` and `performance.grpc_keepalive_interval` now exist to allow for configuration on how often these dead connections will be cleaned up. [[GH-19339](https://github.com/hashicorp/consul/issues/19339)] -* dev-mode: Fix dev mode has new line in responses. Now new line is added only when url has pretty query parameter. [[GH-18367](https://github.com/hashicorp/consul/issues/18367)] -* dns: **(Enterprise only)** Fix bug where sameness group queries did not correctly inherit the agent's partition. -* docs: fix list of telemetry metrics [[GH-17593](https://github.com/hashicorp/consul/issues/17593)] -* gateways: Fix a bug where a service in a peered datacenter could not access an external node service through a terminating gateway [[GH-18959](https://github.com/hashicorp/consul/issues/18959)] -* server: **(Enterprise Only)** Fixed an issue where snake case keys were rejected when configuring the control-plane-request-limit config entry -* telemetry: emit consul version metric on a regular interval. [[GH-6876](https://github.com/hashicorp/consul/issues/6876)] -* tlsutil: Default setting of ServerName field in outgoing TLS configuration for checks now handled by crypto/tls. [[GH-17481](https://github.com/hashicorp/consul/issues/17481)] - ## 1.16.3 (October 31, 2023) SECURITY: @@ -363,126 +103,6 @@ BUG FIXES: * gateways: Fix a bug where a service in a peered datacenter could not access an external node service through a terminating gateway [[GH-18959](https://github.com/hashicorp/consul/issues/18959)] * server: **(Enterprise Only)** Fixed an issue where snake case keys were rejected when configuring the control-plane-request-limit config entry -## 1.15.7 (October 31, 2023) - -SECURITY: - -* Update `golang.org/x/net` to v0.17.0 to address [CVE-2023-39325](https://nvd.nist.gov/vuln/detail/CVE-2023-39325) -/ [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487)(`x/net/http2`). [[GH-19225](https://github.com/hashicorp/consul/issues/19225)] -* Upgrade Go to 1.20.10. -This resolves vulnerability [CVE-2023-39325](https://nvd.nist.gov/vuln/detail/CVE-2023-39325) -/ [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487)(`net/http`). [[GH-19225](https://github.com/hashicorp/consul/issues/19225)] -* Upgrade `google.golang.org/grpc` to 1.56.3. -This resolves vulnerability [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487). [[GH-19414](https://github.com/hashicorp/consul/issues/19414)] -* connect: update supported envoy versions to 1.24.12, 1.25.11 to address [CVE-2023-44487](https://github.com/envoyproxy/envoy/security/advisories/GHSA-jhv4-f7mr-xx76) [[GH-19272](https://github.com/hashicorp/consul/issues/19272)] - -BUG FIXES: - -* Mesh Gateways: Fix a bug where replicated and peered mesh gateways with hostname-based WAN addresses fail to initialize. [[GH-19268](https://github.com/hashicorp/consul/issues/19268)] -* api: add custom marshal/unmarshal for ServiceResolverConfigEntry.RequestTimeout so config entries that set this field can be read using the API. [[GH-19031](https://github.com/hashicorp/consul/issues/19031)] -* ca: Fix bug with Vault CA provider where renewing a retracted token would cause retries in a tight loop, degrading performance. [[GH-19285](https://github.com/hashicorp/consul/issues/19285)] -* ca: Fix bug with Vault CA provider where token renewal goroutines could leak if CA failed to initialize. [[GH-19285](https://github.com/hashicorp/consul/issues/19285)] -* ca: ensure Vault CA provider respects Vault Enterprise namespace configuration. [[GH-19095](https://github.com/hashicorp/consul/issues/19095)] -* catalog api: fixes a bug with catalog api where filter query parameter was not working correctly for the `/v1/catalog/services` endpoint [[GH-18322](https://github.com/hashicorp/consul/issues/18322)] -* connect: Fix bug where uncleanly closed xDS connections would influence connection balancing for too long and prevent envoy instances from starting. Two new configuration fields -`performance.grpc_keepalive_timeout` and `performance.grpc_keepalive_interval` now exist to allow for configuration on how often these dead connections will be cleaned up. [[GH-19339](https://github.com/hashicorp/consul/issues/19339)] -* gateways: Fix a bug where a service in a peered datacenter could not access an external node service through a terminating gateway [[GH-18959](https://github.com/hashicorp/consul/issues/18959)] - -## 1.14.11 (October 31, 2023) - -SECURITY: - -* Update `golang.org/x/net` to v0.17.0 to address [CVE-2023-39325](https://nvd.nist.gov/vuln/detail/CVE-2023-39325) -/ [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487)(`x/net/http2`). [[GH-19225](https://github.com/hashicorp/consul/issues/19225)] -* Upgrade Go to 1.20.10. -This resolves vulnerability [CVE-2023-39325](https://nvd.nist.gov/vuln/detail/CVE-2023-39325) -/ [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487)(`net/http`). [[GH-19225](https://github.com/hashicorp/consul/issues/19225)] -* Upgrade `google.golang.org/grpc` to 1.56.3. -This resolves vulnerability [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487). [[GH-19414](https://github.com/hashicorp/consul/issues/19414)] -* connect: update supported envoy versions to 1.24.12 to address [CVE-2023-44487](https://github.com/envoyproxy/envoy/security/advisories/GHSA-jhv4-f7mr-xx76) [[GH-19271](https://github.com/hashicorp/consul/issues/19271)] - -BUG FIXES: - -* Mesh Gateways: Fix a bug where replicated and peered mesh gateways with hostname-based WAN addresses fail to initialize. [[GH-19268](https://github.com/hashicorp/consul/issues/19268)] -* api: add custom marshal/unmarshal for ServiceResolverConfigEntry.RequestTimeout so config entries that set this field can be read using the API. [[GH-19031](https://github.com/hashicorp/consul/issues/19031)] -* ca: ensure Vault CA provider respects Vault Enterprise namespace configuration. [[GH-19095](https://github.com/hashicorp/consul/issues/19095)] -* catalog api: fixes a bug with catalog api where filter query parameter was not working correctly for the `/v1/catalog/services` endpoint [[GH-18322](https://github.com/hashicorp/consul/issues/18322)] -* connect: Fix bug where uncleanly closed xDS connections would influence connection balancing for too long and prevent envoy instances from starting. Two new configuration fields -`performance.grpc_keepalive_timeout` and `performance.grpc_keepalive_interval` now exist to allow for configuration on how often these dead connections will be cleaned up. [[GH-19339](https://github.com/hashicorp/consul/issues/19339)] - -## 1.17.0-rc1 (October 11, 2023) - -BREAKING CHANGES: - -* api: RaftLeaderTransfer now requires an id string. An empty string can be specified to keep the old behavior. [[GH-17107](https://github.com/hashicorp/consul/issues/17107)] -* audit-logging: **(Enterprise only)** allowing timestamp based filename only on rotation. initially the filename will be just file.json [[GH-18668](https://github.com/hashicorp/consul/issues/18668)] - -FEATURE PREVIEW: **Catalog v2** - -This release provides the ability to preview Consul's v2 Catalog and Resource API if enabled. The new model supports -multi-port application deployments with only a single Envoy proxy. Note that the v1 and v2 catalogs are not cross -compatible, and not all Consul features are available within this v2 feature preview. See the [v2 Catalog and Resource -API documentation](https://developer.hashicorp.com/consul/docs/architecture/v2) for more information. The v2 Catalog and -Resources API should be considered a feature preview within this release and should not be used in production -environments. - -Limitations -* The v2 catalog API feature preview does not support connections with client agents. As a result, it is only available for Kubernetes deployments, which use [Consul dataplanes](consul/docs/connect/dataplane) instead of client agents. -* The v1 and v2 catalog APIs cannot run concurrently. -* The Consul UI does not support multi-port services or the v2 catalog API in this release. -* HCP Consul does not support multi-port services or the v2 catalog API in this release. -* The v2 API only supports transparent proxy mode where services that have permissions to connect to each other can use - Kube DNS to connect. - -Known Issues -* When using the v2 API with transparent proxy, Kubernetes pods cannot use L7 liveness, readiness, or startup probes. - -Significant Pull Requests -* [[Catalog resource controllers]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/internal/catalog/internal/controllers) -* [[Mesh resource controllers]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/internal/mesh/internal/controllers) -* [[Auth resource controllers]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/internal/auth/internal) -* [[V2 Protobufs]](https://github.com/hashicorp/consul/tree/e6b724d06249d3e62cd75afe3ee6042ba1fd5415/proto-public) - -FEATURES: - -* Support custom watches on the Consul Controller framework. [[GH-18439](https://github.com/hashicorp/consul/issues/18439)] -* Windows: support consul connect envoy command on Windows [[GH-17694](https://github.com/hashicorp/consul/issues/17694)] -* acl: Add BindRule support for templated policies. Add new BindType: templated-policy and BindVar field for templated policy variables. [[GH-18719](https://github.com/hashicorp/consul/issues/18719)] -* acl: Add new `acl.tokens.dns` config field which specifies the token used implicitly during dns checks. [[GH-17936](https://github.com/hashicorp/consul/issues/17936)] -* acl: Added ACL Templated policies to simplify getting the right ACL token. [[GH-18708](https://github.com/hashicorp/consul/issues/18708)] -* acl: Adds a new ACL rule for workload identities [[GH-18769](https://github.com/hashicorp/consul/issues/18769)] -* api-gateway: Add support for response header modifiers on http-route configuration entry [[GH-18646](https://github.com/hashicorp/consul/issues/18646)] -* api-gateway: add retry and timeout filters [[GH-18324](https://github.com/hashicorp/consul/issues/18324)] -* cli: Add `bind-var` flag to `consul acl binding-rule` for templated policy variables. [[GH-18719](https://github.com/hashicorp/consul/issues/18719)] -* cli: Add `consul acl templated-policy` commands to read, list and preview templated policies. [[GH-18816](https://github.com/hashicorp/consul/issues/18816)] -* config-entry(api-gateway): (Enterprise only) Add GatewayPolicy to APIGateway Config Entry listeners -* config-entry(api-gateway): (Enterprise only) Add JWTFilter to HTTPRoute Filters -* dataplane: Allow getting bootstrap parameters when using V2 APIs [[GH-18504](https://github.com/hashicorp/consul/issues/18504)] -* gateway: **(Enterprise only)** Add JWT authentication and authorization to APIGateway Listeners and HTTPRoutes. -* mesh: **(Enterprise only)** Adds rate limiting config to service-defaults [[GH-18583](https://github.com/hashicorp/consul/issues/18583)] -* xds: Add a built-in Envoy extension that appends OpenTelemetry Access Logging (otel-access-logging) to the HTTP Connection Manager filter. [[GH-18336](https://github.com/hashicorp/consul/issues/18336)] -* xds: Add support for patching outbound listeners to the built-in Envoy External Authorization extension. [[GH-18336](https://github.com/hashicorp/consul/issues/18336)] - -IMPROVEMENTS: - -* xds: Use downstream protocol when connecting to local app [[GH-18573](https://github.com/hashicorp/consul/issues/18573)] -* Windows: Integration tests for Consul Windows VMs [[GH-18007](https://github.com/hashicorp/consul/issues/18007)] -* acl: Use templated policy to generate synthetic policies for tokens/roles with node and/or service identities [[GH-18813](https://github.com/hashicorp/consul/issues/18813)] -* api: added `CheckRegisterOpts` to Agent API [[GH-18943](https://github.com/hashicorp/consul/issues/18943)] -* api: added `Token` field to `ServiceRegisterOpts` type in Agent API [[GH-18983](https://github.com/hashicorp/consul/issues/18983)] -* ca: Vault CA provider config no longer requires root_pki_path for secondary datacenters [[GH-17831](https://github.com/hashicorp/consul/issues/17831)] -* cli: Added `-templated-policy`, `-templated-policy-file`, `-replace-templated-policy`, `-append-templated-policy`, `-replace-templated-policy-file`, `-append-templated-policy-file` and `-var` flags for creating or updating tokens/roles. [[GH-18708](https://github.com/hashicorp/consul/issues/18708)] -* config: Add new `tls.defaults.verify_server_hostname` configuration option. This specifies the default value for any interfaces that support the `verify_server_hostname` option. [[GH-17155](https://github.com/hashicorp/consul/issues/17155)] -* connect: update supported envoy versions to 1.24.10, 1.25.9, 1.26.4, 1.27.0 [[GH-18300](https://github.com/hashicorp/consul/issues/18300)] -* ui: Use Community verbiage [[GH-18560](https://github.com/hashicorp/consul/issues/18560)] - -BUG FIXES: - -* api: add custom marshal/unmarshal for ServiceResolverConfigEntry.RequestTimeout so config entries that set this field can be read using the API. [[GH-19031](https://github.com/hashicorp/consul/issues/19031)] -* dev-mode: Fix dev mode has new line in responses. Now new line is added only when url has pretty query parameter. [[GH-18367](https://github.com/hashicorp/consul/issues/18367)] -* telemetry: emit consul version metric on a regular interval. [[GH-6876](https://github.com/hashicorp/consul/issues/6876)] -* tlsutil: Default setting of ServerName field in outgoing TLS configuration for checks now handled by crypto/tls. [[GH-17481](https://github.com/hashicorp/consul/issues/17481)] - ## 1.16.2 (September 19, 2023) SECURITY: @@ -520,64 +140,6 @@ BUG FIXES: * gateways: Fix a bug where gateway to service mappings weren't being cleaned up properly when externally registered proxies were being deregistered. [[GH-18831](https://github.com/hashicorp/consul/issues/18831)] * telemetry: emit consul version metric on a regular interval. [[GH-18724](https://github.com/hashicorp/consul/issues/18724)] -## 1.15.6 (September 19, 2023) - -SECURITY: - -* Upgrade to use Go 1.20.8. This resolves CVEs -[CVE-2023-39320](https://github.com/advisories/GHSA-rxv8-v965-v333) (`cmd/go`), -[CVE-2023-39318](https://github.com/advisories/GHSA-vq7j-gx56-rxjh) (`html/template`), -[CVE-2023-39319](https://github.com/advisories/GHSA-vv9m-32rr-3g55) (`html/template`), -[CVE-2023-39321](https://github.com/advisories/GHSA-9v7r-x7cv-v437) (`crypto/tls`), and -[CVE-2023-39322](https://github.com/advisories/GHSA-892h-r6cr-53g4) (`crypto/tls`) [[GH-18742](https://github.com/hashicorp/consul/issues/18742)] - -IMPROVEMENTS: - -* Adds flag -append-filename (which works on values version, dc, node and status) to consul snapshot save command. -Adding the flag -append-filename version,dc,node,status will add consul version, consul datacenter, node name and leader/follower -(status) in the file name given in the snapshot save command before the file extension. [[GH-18625](https://github.com/hashicorp/consul/issues/18625)] -* Reduce the frequency of metric exports from Consul to HCP from every 10s to every 1m [[GH-18584](https://github.com/hashicorp/consul/issues/18584)] -* api: Add support for listing ACL tokens by service name. [[GH-18667](https://github.com/hashicorp/consul/issues/18667)] -* command: Adds -since flag in consul debug command which internally calls hcdiag for debug information in the past. [[GH-18797](https://github.com/hashicorp/consul/issues/18797)] -* log: Currently consul logs files like this consul-{timestamp}.log. This change makes sure that there is always -consul.log file with the latest logs in it. [[GH-18617](https://github.com/hashicorp/consul/issues/18617)] - -BUG FIXES: - -* api: Fix `/v1/agent/self` not returning latest configuration [[GH-18681](https://github.com/hashicorp/consul/issues/18681)] -* ca: Vault provider now cleans up the previous Vault issuer and key when generating a new leaf signing certificate [[GH-18779](https://github.com/hashicorp/consul/issues/18779)] [[GH-18773](https://github.com/hashicorp/consul/issues/18773)] -* check: prevent go routine leakage when existing Defercheck of same check id is not nil [[GH-18558](https://github.com/hashicorp/consul/issues/18558)] -* gateways: Fix a bug where gateway to service mappings weren't being cleaned up properly when externally registered proxies were being deregistered. [[GH-18831](https://github.com/hashicorp/consul/issues/18831)] -* telemetry: emit consul version metric on a regular interval. [[GH-18724](https://github.com/hashicorp/consul/issues/18724)] - -## 1.14.10 (September 19, 2023) - -SECURITY: - -* Upgrade to use Go 1.20.8. This resolves CVEs -[CVE-2023-39320](https://github.com/advisories/GHSA-rxv8-v965-v333) (`cmd/go`), -[CVE-2023-39318](https://github.com/advisories/GHSA-vq7j-gx56-rxjh) (`html/template`), -[CVE-2023-39319](https://github.com/advisories/GHSA-vv9m-32rr-3g55) (`html/template`), -[CVE-2023-39321](https://github.com/advisories/GHSA-9v7r-x7cv-v437) (`crypto/tls`), and -[CVE-2023-39322](https://github.com/advisories/GHSA-892h-r6cr-53g4) (`crypto/tls`) [[GH-18742](https://github.com/hashicorp/consul/issues/18742)] - -IMPROVEMENTS: - -* Adds flag -append-filename (which works on values version, dc, node and status) to consul snapshot save command. -Adding the flag -append-filename version,dc,node,status will add consul version, consul datacenter, node name and leader/follower -(status) in the file name given in the snapshot save command before the file extension. [[GH-18625](https://github.com/hashicorp/consul/issues/18625)] -* api: Add support for listing ACL tokens by service name. [[GH-18667](https://github.com/hashicorp/consul/issues/18667)] -* command: Adds -since flag in consul debug command which internally calls hcdiag for debug information in the past. [[GH-18797](https://github.com/hashicorp/consul/issues/18797)] -* log: Currently consul logs files like this consul-{timestamp}.log. This change makes sure that there is always -consul.log file with the latest logs in it. [[GH-18617](https://github.com/hashicorp/consul/issues/18617)] - -BUG FIXES: - -* api: Fix `/v1/agent/self` not returning latest configuration [[GH-18681](https://github.com/hashicorp/consul/issues/18681)] -* ca: Vault provider now cleans up the previous Vault issuer and key when generating a new leaf signing certificate [[GH-18779](https://github.com/hashicorp/consul/issues/18779)] [[GH-18773](https://github.com/hashicorp/consul/issues/18773)] -* gateways: Fix a bug where gateway to service mappings weren't being cleaned up properly when externally registered proxies were being deregistered. [[GH-18831](https://github.com/hashicorp/consul/issues/18831)] -* telemetry: emit consul version metric on a regular interval. [[GH-18724](https://github.com/hashicorp/consul/issues/18724)] - ## 1.16.1 (August 8, 2023) KNOWN ISSUES: @@ -641,83 +203,6 @@ we now reject those earlier in the process when we validate the certificate. [[G https://github.com/rboyer/safeio/pull/3 [[GH-18302](https://github.com/hashicorp/consul/issues/18302)] * xds: Prevent partial application of non-Required Envoy extensions in the case of failure. [[GH-18068](https://github.com/hashicorp/consul/issues/18068)] -## 1.15.5 (August 8, 2023) - -SECURITY: - -* Update `golang.org/x/net` to v0.13.0 to address [CVE-2023-3978](https://nvd.nist.gov/vuln/detail/CVE-2023-3978). [[GH-18358](https://github.com/hashicorp/consul/issues/18358)] -* Upgrade golang.org/x/net to address [CVE-2023-29406](https://nvd.nist.gov/vuln/detail/CVE-2023-29406) [[GH-18186](https://github.com/hashicorp/consul/issues/18186)] -* Upgrade to use Go 1.20.6. -This resolves [CVE-2023-29406](https://github.com/advisories/GHSA-f8f7-69v5-w4vx)(`net/http`) for uses of the standard library. -A separate change updates dependencies on `golang.org/x/net` to use `0.12.0`. [[GH-18190](https://github.com/hashicorp/consul/issues/18190)] -* Upgrade to use Go 1.20.7. -This resolves vulnerability [CVE-2023-29409](https://nvd.nist.gov/vuln/detail/CVE-2023-29409)(`crypto/tls`). [[GH-18358](https://github.com/hashicorp/consul/issues/18358)] - -FEATURES: - -* cli: `consul members` command uses `-filter` expression to filter members based on bexpr. [[GH-18223](https://github.com/hashicorp/consul/issues/18223)] -* cli: `consul watch` command uses `-filter` expression to filter response from checks, services, nodes, and service. [[GH-17780](https://github.com/hashicorp/consul/issues/17780)] -* reloadable config: Made enable_debug config reloadable and enable pprof command to work when config toggles to true [[GH-17565](https://github.com/hashicorp/consul/issues/17565)] - -IMPROVEMENTS: - -* Fix some typos in metrics docs [[GH-18080](https://github.com/hashicorp/consul/issues/18080)] -* acl: added builtin ACL policy that provides global read-only access (builtin/global-read-only) [[GH-18319](https://github.com/hashicorp/consul/issues/18319)] -* acl: allow for a single slash character in policy names [[GH-18319](https://github.com/hashicorp/consul/issues/18319)] -* connect: Add capture group labels from Envoy cluster FQDNs to Envoy exported metric labels [[GH-17888](https://github.com/hashicorp/consul/issues/17888)] -* connect: update supported envoy versions to 1.22.11, 1.23.12, 1.24.10, 1.25.9 [[GH-18304](https://github.com/hashicorp/consul/issues/18304)] -* hcp: Add dynamic configuration support for the export of server metrics to HCP. [[GH-18168](https://github.com/hashicorp/consul/issues/18168)] -* hcp: Removes requirement for HCP to provide a management token [[GH-18140](https://github.com/hashicorp/consul/issues/18140)] -* xds: Explicitly enable WebSocket connection upgrades in HTTP connection manager [[GH-18150](https://github.com/hashicorp/consul/issues/18150)] - -BUG FIXES: - -* Fix a bug that wrongly trims domains when there is an overlap with DC name. [[GH-17160](https://github.com/hashicorp/consul/issues/17160)] -* api-gateway: fix race condition in proxy config generation when Consul is notified of the bound-api-gateway config entry before it is notified of the api-gateway config entry. [[GH-18291](https://github.com/hashicorp/consul/issues/18291)] -* connect/ca: Fixes a bug preventing CA configuration updates in secondary datacenters [[GH-17846](https://github.com/hashicorp/consul/issues/17846)] -* connect: Fix incorrect protocol config merging for transparent proxy implicit upstreams. [[GH-17894](https://github.com/hashicorp/consul/issues/17894)] -* connect: Removes the default health check from the `consul connect envoy` command when starting an API Gateway. -This health check would always fail. [[GH-18011](https://github.com/hashicorp/consul/issues/18011)] -* connect: fix a bug with Envoy potentially starting with incomplete configuration by not waiting enough for initial xDS configuration. [[GH-18024](https://github.com/hashicorp/consul/issues/18024)] -* snapshot: fix access denied and handle is invalid when we call snapshot save on windows - skip sync() for folders in windows in -https://github.com/rboyer/safeio/pull/3 [[GH-18302](https://github.com/hashicorp/consul/issues/18302)] - -## 1.14.9 (August 8, 2023) - -SECURITY: - -* Update `golang.org/x/net` to v0.13.0 to address [CVE-2023-3978](https://nvd.nist.gov/vuln/detail/CVE-2023-3978). [[GH-18358](https://github.com/hashicorp/consul/issues/18358)] -* Upgrade golang.org/x/net to address [CVE-2023-29406](https://nvd.nist.gov/vuln/detail/CVE-2023-29406) [[GH-18186](https://github.com/hashicorp/consul/issues/18186)] -* Upgrade to use Go 1.20.6. -This resolves [CVE-2023-29406](https://github.com/advisories/GHSA-f8f7-69v5-w4vx)(`net/http`) for uses of the standard library. -A separate change updates dependencies on `golang.org/x/net` to use `0.12.0`. [[GH-18190](https://github.com/hashicorp/consul/issues/18190)] -* Upgrade to use Go 1.20.7. -This resolves vulnerability [CVE-2023-29409](https://nvd.nist.gov/vuln/detail/CVE-2023-29409)(`crypto/tls`). [[GH-18358](https://github.com/hashicorp/consul/issues/18358)] - -FEATURES: - -* cli: `consul members` command uses `-filter` expression to filter members based on bexpr. [[GH-18223](https://github.com/hashicorp/consul/issues/18223)] -* cli: `consul watch` command uses `-filter` expression to filter response from checks, services, nodes, and service. [[GH-17780](https://github.com/hashicorp/consul/issues/17780)] -* reloadable config: Made enable_debug config reloadable and enable pprof command to work when config toggles to true [[GH-17565](https://github.com/hashicorp/consul/issues/17565)] - -IMPROVEMENTS: - -* Fix some typos in metrics docs [[GH-18080](https://github.com/hashicorp/consul/issues/18080)] -* acl: added builtin ACL policy that provides global read-only access (builtin/global-read-only) [[GH-18319](https://github.com/hashicorp/consul/issues/18319)] -* acl: allow for a single slash character in policy names [[GH-18319](https://github.com/hashicorp/consul/issues/18319)] -* connect: update supported envoy versions to 1.21.6, 1.22.11, 1.23.12, 1.24.10 [[GH-18305](https://github.com/hashicorp/consul/issues/18305)] -* hcp: Removes requirement for HCP to provide a management token [[GH-18140](https://github.com/hashicorp/consul/issues/18140)] -* xds: Explicitly enable WebSocket connection upgrades in HTTP connection manager [[GH-18150](https://github.com/hashicorp/consul/issues/18150)] - -BUG FIXES: - -* Fix a bug that wrongly trims domains when there is an overlap with DC name. [[GH-17160](https://github.com/hashicorp/consul/issues/17160)] -* connect/ca: Fixes a bug preventing CA configuration updates in secondary datacenters [[GH-17846](https://github.com/hashicorp/consul/issues/17846)] -* connect: Fix incorrect protocol config merging for transparent proxy implicit upstreams. [[GH-17894](https://github.com/hashicorp/consul/issues/17894)] -* connect: fix a bug with Envoy potentially starting with incomplete configuration by not waiting enough for initial xDS configuration. [[GH-18024](https://github.com/hashicorp/consul/issues/18024)] -* snapshot: fix access denied and handle is invalid when we call snapshot save on windows - skip sync() for folders in windows in -https://github.com/rboyer/safeio/pull/3 [[GH-18302](https://github.com/hashicorp/consul/issues/18302)] - ## 1.16.0 (June 26, 2023) KNOWN ISSUES: @@ -795,100 +280,6 @@ BUG FIXES: * ui: fixes ui tests run on CI [[GH-16428](https://github.com/hashicorp/consul/issues/16428)] * xds: Fixed a bug where modifying ACLs on a token being actively used for an xDS connection caused all xDS updates to fail. [[GH-17566](https://github.com/hashicorp/consul/issues/17566)] -## 1.15.4 (June 26, 2023) -FEATURES: - -* cli: `consul operator raft list-peers` command shows the number of commits each follower is trailing the leader by to aid in troubleshooting. [[GH-17582](https://github.com/hashicorp/consul/issues/17582)] -* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)] - -IMPROVEMENTS: - -* connect: update supported envoy versions to 1.22.11, 1.23.9, 1.24.7, 1.25.6 [[GH-17545](https://github.com/hashicorp/consul/issues/17545)] -* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)] -* fix metric names in /docs/agent/telemetry [[GH-17577](https://github.com/hashicorp/consul/issues/17577)] -* gateway: Change status condition reason for invalid certificate on a listener from "Accepted" to "ResolvedRefs". [[GH-17115](https://github.com/hashicorp/consul/issues/17115)] -* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)] - -BUG FIXES: - -* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)] -* docs: fix list of telemetry metrics [[GH-17593](https://github.com/hashicorp/consul/issues/17593)] -* gateways: **(Enterprise only)** Fixed a bug in API gateways where gateway configuration objects in non-default partitions did not reconcile properly. [[GH-17581](https://github.com/hashicorp/consul/issues/17581)] -* gateways: Fixed a bug in API gateways where binding a route that only targets a service imported from a peer results - in the programmed gateway having no routes. [[GH-17609](https://github.com/hashicorp/consul/issues/17609)] -* gateways: Fixed a bug where API gateways were not being taken into account in determining xDS rate limits. [[GH-17631](https://github.com/hashicorp/consul/issues/17631)] -* http: fixed API endpoint `PUT /acl/token/:AccessorID` (update token), no longer requires `AccessorID` in the request body. Web UI can now update tokens. [[GH-17739](https://github.com/hashicorp/consul/issues/17739)] -* namespaces: **(Enterprise only)** fixes a bug where agent health checks stop syncing for all services on a node if the namespace of any service has been removed from the server. -* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions. - Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints. -* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)] -* xds: Fixed a bug where modifying ACLs on a token being actively used for an xDS connection caused all xDS updates to fail. [[GH-17566](https://github.com/hashicorp/consul/issues/17566)] - -## 1.14.8 (June 26, 2023) - -SECURITY: - -* Update to UBI base image to 9.2. [[GH-17513](https://github.com/hashicorp/consul/issues/17513)] - -FEATURES: - -* cli: `consul operator raft list-peers` command shows the number of commits each follower is trailing the leader by to aid in troubleshooting. [[GH-17582](https://github.com/hashicorp/consul/issues/17582)] -* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)] - -IMPROVEMENTS: - -* connect: update supported envoy versions to 1.21.6, 1.22.11, 1.23.9, 1.24.7 [[GH-17547](https://github.com/hashicorp/consul/issues/17547)] -* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)] -* fix metric names in /docs/agent/telemetry [[GH-17577](https://github.com/hashicorp/consul/issues/17577)] -* peering: gRPC queries for TrustBundleList, TrustBundleRead, PeeringList, and PeeringRead now support blocking semantics, - reducing network and CPU demand. - The HTTP APIs for Peering List and Read have been updated to support blocking. [[GH-17426](https://github.com/hashicorp/consul/issues/17426)] -* raft: Remove expensive reflection from raft/mesh hot path [[GH-16552](https://github.com/hashicorp/consul/issues/16552)] -* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)] - -BUG FIXES: - -* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)] -* connect: reverts #17317 fix that caused a downstream error for Ingress/Mesh/Terminating GWs when their respective config entry does not already exist. [[GH-17541](https://github.com/hashicorp/consul/issues/17541)] -* namespaces: **(Enterprise only)** fixes a bug where agent health checks stop syncing for all services on a node if the namespace of any service has been removed from the server. -* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions. - Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints. -* namespaces: adjusts the return type from HTTP list API to return the `api` module representation of a namespace. - This fixes an error with the `consul namespace list` command when a namespace has a deferred deletion timestamp. -* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)] -* peering: Fix issue where modifying the list of exported services did not correctly replicate changes for services that exist in a non-default namespace. [[GH-17456](https://github.com/hashicorp/consul/issues/17456)] - -## 1.13.9 (June 26, 2023) -BREAKING CHANGES: - -* connect: Disable peering by default in connect proxies for Consul 1.13. This change was made to prevent inefficient polling - queries from having a negative impact on server performance. Peering in Consul 1.13 is an experimental feature and is not - recommended for use in production environments. If you still wish to use the experimental peering feature, ensure - [`peering.enabled = true`](https://developer.hashicorp.com/consul/docs/v1.13.x/agent/config/config-files#peering_enabled) - is set on all clients and servers. [[GH-17731](https://github.com/hashicorp/consul/issues/17731)] - -SECURITY: - -* Update to UBI base image to 9.2. [[GH-17513](https://github.com/hashicorp/consul/issues/17513)] - -FEATURES: - -* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)] - -IMPROVEMENTS: - -* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)] -* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)] - -BUG FIXES: - -* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)] -* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions. - Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints. -* namespaces: adjusts the return type from HTTP list API to return the `api` module representation of a namespace. - This fixes an error with the `consul namespace list` command when a namespace has a deferred deletion timestamp. -* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)] - ## 1.16.0-rc1 (June 12, 2023) BREAKING CHANGES: @@ -1906,24 +1297,6 @@ NOTES: * ci: change action to pull v1 instead of main [[GH-12846](https://github.com/hashicorp/consul/issues/12846)] -## 1.11.6 (May 25, 2022) - -IMPROVEMENTS: - -* sentinel: **(Enterprise Only)** Sentinel now uses SHA256 to generate policy ids - -BUG FIXES: - -* Fix a bug when configuring an `add_headers` directive named `Host` the header is not set for `v1/internal/ui/metrics-proxy/` endpoint. [[GH-13071](https://github.com/hashicorp/consul/issues/13071)] -* areas: **(Enterprise Only)** Fixes a bug when using Yamux pool ( for servers version 1.7.3 and later), the entire pool was locked while connecting to a remote location, which could potentially take a long time. [[GH-1368](https://github.com/hashicorp/consul/issues/1368)] -* ca: fix a bug that caused a non blocking leaf cert query after a blocking leaf cert query to block [[GH-12820](https://github.com/hashicorp/consul/issues/12820)] -* health: ensure /v1/health/service/:service endpoint returns the most recent results when a filter is used with streaming #12640 [[GH-12640](https://github.com/hashicorp/consul/issues/12640)] -* snapshot-agent: **(Enterprise only)** Fix a bug where providing the ACL token to the snapshot agent via a CLI or ENV variable without a license configured results in an error during license auto-retrieval. - -NOTES: - -* ci: change action to pull v1 instead of main [[GH-12846](https://github.com/hashicorp/consul/issues/12846)] - ## 1.12.0 (April 20, 2022) BREAKING CHANGES: @@ -2008,6 +1381,24 @@ NOTES: * Forked net/rpc to add middleware support: https://github.com/hashicorp/consul-net-rpc/ . [[GH-12311](https://github.com/hashicorp/consul/issues/12311)] * dependency: Upgrade to use Go 1.18.1 [[GH-12808](https://github.com/hashicorp/consul/issues/12808)] +## 1.11.6 (May 25, 2022) + +IMPROVEMENTS: + +* sentinel: **(Enterprise Only)** Sentinel now uses SHA256 to generate policy ids + +BUG FIXES: + +* Fix a bug when configuring an `add_headers` directive named `Host` the header is not set for `v1/internal/ui/metrics-proxy/` endpoint. [[GH-13071](https://github.com/hashicorp/consul/issues/13071)] +* areas: **(Enterprise Only)** Fixes a bug when using Yamux pool ( for servers version 1.7.3 and later), the entire pool was locked while connecting to a remote location, which could potentially take a long time. [[GH-1368](https://github.com/hashicorp/consul/issues/1368)] +* ca: fix a bug that caused a non blocking leaf cert query after a blocking leaf cert query to block [[GH-12820](https://github.com/hashicorp/consul/issues/12820)] +* health: ensure /v1/health/service/:service endpoint returns the most recent results when a filter is used with streaming #12640 [[GH-12640](https://github.com/hashicorp/consul/issues/12640)] +* snapshot-agent: **(Enterprise only)** Fix a bug where providing the ACL token to the snapshot agent via a CLI or ENV variable without a license configured results in an error during license auto-retrieval. + +NOTES: + +* ci: change action to pull v1 instead of main [[GH-12846](https://github.com/hashicorp/consul/issues/12846)] + ## 1.11.5 (April 13, 2022) SECURITY: diff --git a/Dockerfile b/Dockerfile index faeb85810fae2..24c1bee803d9d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 # This Dockerfile contains multiple targets. # Use 'docker build --target= .' to build one. @@ -233,7 +233,7 @@ COPY LICENSE /licenses/mozilla.txt # Set up certificates and base tools. # dumb-init is downloaded directly from GitHub because there's no RPM package. -# Its shasum is hardcoded. If you upgrade the dumb-init version you'll need to +# Its shasum is hardcoded. If you upgrade the dumb-init verion you'll need to # also update the shasum. RUN set -eux && \ microdnf install -y ca-certificates shadow-utils gnupg libcap openssl iputils jq iptables wget unzip tar && \ diff --git a/Dockerfile-windows b/Dockerfile-windows deleted file mode 100644 index 14582908db550..0000000000000 --- a/Dockerfile-windows +++ /dev/null @@ -1,51 +0,0 @@ -FROM mcr.microsoft.com/windows/servercore:ltsc2019 -ARG VERSION=1.16.0 - -ENV chocolateyVersion=1.4.0 - -LABEL org.opencontainers.image.authors="Consul Team " \ - org.opencontainers.image.url="https://www.consul.io/" \ - org.opencontainers.image.documentation="https://www.consul.io/docs" \ - org.opencontainers.image.source="https://github.com/hashicorp/consul" \ - org.opencontainers.image.version=$VERSION \ - org.opencontainers.image.vendor="HashiCorp" \ - org.opencontainers.image.title="consul" \ - org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration." \ - version=${VERSION} - -RUN ["powershell", "Set-ExecutionPolicy", "Bypass", "-Scope", "Process", "-Force;"] -RUN ["powershell", "iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))"] - -RUN choco install git.install -yf -RUN SETX /M path "%PATH%;C:\Program Files\Git\bin" - -RUN mkdir C:\\consul -RUN mkdir C:\\consul\\data -RUN mkdir C:\\consul\\config - -# Server RPC is used for communication between Consul clients and servers for internal -# request forwarding. -EXPOSE 8300 - -# Serf LAN and WAN (WAN is used only by Consul servers) are used for gossip between -# Consul agents. LAN is within the datacenter and WAN is between just the Consul -# servers in all datacenters. -EXPOSE 8301 8301/udp 8302 8302/udp - -# HTTP and DNS (both TCP and UDP) are the primary interfaces that applications -# use to interact with Consul. -EXPOSE 8500 8600 8600/udp - -#ENV CONSUL_URL=https://releases.hashicorp.com/consul/${VERSION}/consul_${VERSION}_windows_amd64.zip -#RUN curl %CONSUL_URL% -L -o consul.zip -#RUN tar -xf consul.zip -C consul - -COPY consul.exe C:\\consul - -COPY .release/docker/docker-entrypoint-windows.sh C:\\docker-entrypoint-windows.sh -ENTRYPOINT ["bash.exe", "docker-entrypoint-windows.sh"] - -# By default you'll get an insecure single-node development server that stores -# everything in RAM, exposes a web UI and HTTP endpoints, and bootstraps itself. -# Don't use this configuration for production. -CMD ["agent", "-dev", "-client", "0.0.0.0"] diff --git a/Makefile b/GNUmakefile similarity index 66% rename from Makefile rename to GNUmakefile index 3618f17df886a..b9133befe3628 100644 --- a/Makefile +++ b/GNUmakefile @@ -3,7 +3,6 @@ SHELL = bash - GO_MODULES := $(shell find . -name go.mod -exec dirname {} \; | grep -v "proto-gen-rpc-glue/e2e" | sort) ### @@ -11,18 +10,15 @@ GO_MODULES := $(shell find . -name go.mod -exec dirname {} \; | grep -v "proto-g # or the string @DEV to imply use what is currently installed locally. ### GOLANGCI_LINT_VERSION='v1.55.2' -MOCKERY_VERSION='v2.41.0' -BUF_VERSION='v1.26.0' +MOCKERY_VERSION='v2.20.0' +BUF_VERSION='v1.14.0' -PROTOC_GEN_GO_GRPC_VERSION='v1.2.0' -MOG_VERSION='v0.4.1' +PROTOC_GEN_GO_GRPC_VERSION="v1.2.0" +MOG_VERSION='v0.4.0' PROTOC_GO_INJECT_TAG_VERSION='v1.3.0' -PROTOC_GEN_GO_BINARY_VERSION='v0.1.0' +PROTOC_GEN_GO_BINARY_VERSION="v0.1.0" DEEP_COPY_VERSION='bc3f5aa5735d8a54961580a3a24422c308c831c2' -COPYWRITE_TOOL_VERSION='v0.16.4' -LINT_CONSUL_RETRY_VERSION='v1.4.0' -# Go imports formatter -GCI_VERSION='v0.11.2' +LINT_CONSUL_RETRY_VERSION='v1.3.0' MOCKED_PB_DIRS= pbdns @@ -71,11 +67,15 @@ CONSUL_IMAGE_VERSION?=latest # When changing the method of Go version detection, also update # version detection in CI workflows (reusable-get-go-version.yml). GOLANG_VERSION?=$(shell head -n 1 .go-version) -ENVOY_VERSION?='1.28.0' -CONSUL_DATAPLANE_IMAGE := $(or $(CONSUL_DATAPLANE_IMAGE),"docker.io/hashicorppreview/consul-dataplane:1.3-dev-ubi") -DEPLOYER_CONSUL_DATAPLANE_IMAGE := $(or $(DEPLOYER_CONSUL_DATAPLANE_IMAGE), "docker.io/hashicorppreview/consul-dataplane:1.3-dev") +ENVOY_VERSION?='1.25.4' -CONSUL_VERSION?=$(shell cat version/VERSION) +################ +# CI Variables # +################ +CI_DEV_DOCKER_NAMESPACE?=hashicorpdev +CI_DEV_DOCKER_IMAGE_NAME?=consul +CI_DEV_DOCKER_WORKDIR?=bin/ +################ TEST_MODCACHE?=1 TEST_BUILDCACHE?=1 @@ -155,27 +155,23 @@ ifdef SKIP_DOCKER_BUILD ENVOY_INTEG_DEPS=noop endif -##@ Build - -.PHONY: all -all: dev-build ## Command running by default +all: dev-build # used to make integration dependencies conditional noop: ; -.PHONY: dev -dev: dev-build ## Dev creates binaries for testing locally - these are put into ./bin +# dev creates binaries for testing locally - these are put into ./bin +dev: dev-build -.PHONY: dev-build -dev-build: ## Same as dev +dev-build: mkdir -p bin CGO_ENABLED=0 go install -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)" # rm needed due to signature caching (https://apple.stackexchange.com/a/428388) rm -f ./bin/consul cp ${MAIN_GOPATH}/bin/consul ./bin/consul -.PHONY: dev-docker-dbg -dev-docker-dbg: dev-docker ## Build containers for debug mode + +dev-docker-dbg: dev-docker @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" @docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null @echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)" @@ -187,8 +183,7 @@ dev-docker-dbg: dev-docker ## Build containers for debug mode --load \ -f $(CURDIR)/build-support/docker/Consul-Dev-Dbg.dockerfile $(CURDIR)/pkg/bin/ -.PHONY: dev-docker -dev-docker: linux dev-build ## Build and tag docker images in dev env +dev-docker: linux dev-build @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" @docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null @echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)" @@ -197,20 +192,15 @@ dev-docker: linux dev-build ## Build and tag docker images in dev env @docker buildx use default && docker buildx build -t 'consul:local' -t '$(CONSUL_DEV_IMAGE)' \ --platform linux/$(GOARCH) \ --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \ - --label org.opencontainers.image.version=$(CONSUL_VERSION) \ - --label version=$(CONSUL_VERSION) \ --load \ -f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/ - docker tag 'consul:local' '$(CONSUL_COMPAT_TEST_IMAGE):local' -.PHONY: check-remote-dev-image-env -check-remote-dev-image-env: ## Check remote dev image env +check-remote-dev-image-env: ifndef REMOTE_DEV_IMAGE $(error REMOTE_DEV_IMAGE is undefined: set this image to /:, e.g. hashicorp/consul-k8s-dev:latest) endif -.PHONY: remote-docker -remote-docker: check-remote-dev-image-env ## Remote docker +remote-docker: check-remote-dev-image-env $(MAKE) GOARCH=amd64 linux $(MAKE) GOARCH=arm64 linux @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" @@ -222,17 +212,51 @@ remote-docker: check-remote-dev-image-env ## Remote docker @docker buildx use consul-builder && docker buildx build -t '$(REMOTE_DEV_IMAGE)' \ --platform linux/amd64,linux/arm64 \ --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \ - --label org.opencontainers.image.version=$(CONSUL_VERSION) \ - --label version=$(CONSUL_VERSION) \ --push \ -f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/ -linux: ## Linux builds a linux binary compatible with the source platform +# In CI, the linux binary will be attached from a previous step at bin/. This make target +# should only run in CI and not locally. +ci.dev-docker: + @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" + @docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null + @echo "Building Consul Development container - $(CI_DEV_DOCKER_IMAGE_NAME)" + @docker build $(NOCACHE) $(QUIET) -t '$(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):$(GIT_COMMIT)' \ + --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \ + --label COMMIT_SHA=$(CIRCLE_SHA1) \ + --label PULL_REQUEST=$(CIRCLE_PULL_REQUEST) \ + --label CIRCLE_BUILD_URL=$(CIRCLE_BUILD_URL) \ + $(CI_DEV_DOCKER_WORKDIR) -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile + @echo $(DOCKER_PASS) | docker login -u="$(DOCKER_USER)" --password-stdin + @echo "Pushing dev image to: https://cloud.docker.com/u/hashicorpdev/repository/docker/hashicorpdev/consul" + @docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):$(GIT_COMMIT) +ifeq ($(CIRCLE_BRANCH), main) + @docker tag $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):$(GIT_COMMIT) $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest + @docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest +endif + +# linux builds a linux binary compatible with the source platform +linux: @mkdir -p ./pkg/bin/linux_$(GOARCH) CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o ./pkg/bin/linux_$(GOARCH) -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)" +# dist builds binaries for all platforms and packages them for distribution +dist: + @$(SHELL) $(CURDIR)/build-support/scripts/release.sh -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' $(DIST_VERSION_ARG) $(DIST_DATE_ARG) $(DIST_REL_ARG) + +cover: cov +cov: other-consul dev-build + go test -tags '$(GOTAGS)' ./... -coverprofile=coverage.out + cd sdk && go test -tags '$(GOTAGS)' ./... -coverprofile=../coverage.sdk.part + cd api && go test -tags '$(GOTAGS)' ./... -coverprofile=../coverage.api.part + grep -h -v "mode: set" coverage.{sdk,api}.part >> coverage.out + rm -f coverage.{sdk,api}.part + go tool cover -html=coverage.out + +test: other-consul dev-build lint test-internal + .PHONY: go-mod-tidy -go-mod-tidy: $(foreach mod,$(GO_MODULES),go-mod-tidy/$(mod)) ## Run go mod tidy in every module +go-mod-tidy: $(foreach mod,$(GO_MODULES),go-mod-tidy/$(mod)) .PHONY: mod-tidy/% go-mod-tidy/%: @@ -252,69 +276,7 @@ endif @echo "--> Running go mod tidy ($*)" @cd $* && go mod tidy -##@ Checks - -.PHONY: fmt -fmt: $(foreach mod,$(GO_MODULES),fmt/$(mod)) ## Format go modules - -.PHONY: fmt/% -fmt/%: - @echo "--> Running go fmt ($*)" - @cd $* && gofmt -s -l -w . - -.PHONY: lint -lint: $(foreach mod,$(GO_MODULES),lint/$(mod)) lint-container-test-deps ## Lint go modules and test deps - -.PHONY: lint/% -lint/%: - @echo "--> Running golangci-lint ($*)" - @cd $* && GOWORK=off golangci-lint run --build-tags '$(GOTAGS)' - @echo "--> Running lint-consul-retry ($*)" - @cd $* && GOWORK=off lint-consul-retry - @echo "--> Running enumcover ($*)" - @cd $* && GOWORK=off enumcover ./... - -.PHONY: lint-consul-retry -lint-consul-retry: $(foreach mod,$(GO_MODULES),lint-consul-retry/$(mod)) - -.PHONY: lint-consul-retry/% -lint-consul-retry/%: lint-tools - @echo "--> Running lint-consul-retry ($*)" - @cd $* && GOWORK=off lint-consul-retry - - -# check that the test-container module only imports allowlisted packages -# from the root consul module. Generally we don't want to allow these imports. -# In a few specific instances though it is okay to import test definitions and -# helpers from some of the packages in the root module. -.PHONY: lint-container-test-deps -lint-container-test-deps: ## Check that the test-container module only imports allowlisted packages from the root consul module. - @echo "--> Checking container tests for bad dependencies" - @cd test/integration/consul-container && \ - $(CURDIR)/build-support/scripts/check-allowed-imports.sh \ - github.com/hashicorp/consul \ - "internal/catalog/catalogtest" \ - "internal/resource/resourcetest" - -##@ Testing - -.PHONY: cover -cover: cov ## Run tests and generate coverage report - -.PHONY: cov -cov: other-consul dev-build - go test -tags '$(GOTAGS)' ./... -coverprofile=coverage.out - cd sdk && go test -tags '$(GOTAGS)' ./... -coverprofile=../coverage.sdk.part - cd api && go test -tags '$(GOTAGS)' ./... -coverprofile=../coverage.api.part - grep -h -v "mode: set" coverage.{sdk,api}.part >> coverage.out - rm -f coverage.{sdk,api}.part - go tool cover -html=coverage.out - -.PHONY: test -test: other-consul dev-build lint test-internal - -.PHONY: test-internal -test-internal: ## Test internal +test-internal: @echo "--> Running go test" @rm -f test.log exit-code @# Dump verbose output to test.log so we can surface test names on failure but @@ -343,195 +305,113 @@ test-internal: ## Test internal @grep '^FAIL' test.log || true @if [ "$$(cat exit-code)" == "0" ] ; then echo "PASS" ; exit 0 ; else exit 1 ; fi -.PHONY: test-all -test-all: other-consul dev-build lint $(foreach mod,$(GO_MODULES),test-module/$(mod)) ## Test all +test-all: other-consul dev-build lint $(foreach mod,$(GO_MODULES),test-module/$(mod)) -.PHONY: test-module/% test-module/%: @echo "--> Running go test ($*)" cd $* && go test $(GOTEST_FLAGS) -tags '$(GOTAGS)' ./... -.PHONY: test-race -test-race: ## Test race +test-race: $(MAKE) GOTEST_FLAGS=-race -.PHONY: other-consul -other-consul: ## Checking for other consul instances +test-docker: linux go-build-image + @# -ti run in the foreground showing stdout + @# --rm removes the container once its finished running + @# GO_MODCACHE_VOL - args for mapping in the go module cache + @# GO_BUILD_CACHE_VOL - args for mapping in the go build cache + @# All the env vars are so we pass through all the relevant bits of information + @# Needed for running the tests + @# We map in our local linux_amd64 bin directory as thats where the linux dep + @# target dropped the binary. We could build the binary in the container too + @# but that might take longer as caching gets weird + @# Lastly we map the source dir here to the /consul workdir + @echo "Running tests within a docker container" + @docker run -ti --rm \ + -e 'GOTEST_FLAGS=$(GOTEST_FLAGS)' \ + -e 'GOTAGS=$(GOTAGS)' \ + -e 'GIT_COMMIT=$(GIT_COMMIT)' \ + -e 'GIT_COMMIT_YEAR=$(GIT_COMMIT_YEAR)' \ + -e 'GIT_DIRTY=$(GIT_DIRTY)' \ + $(TEST_PARALLELIZATION) \ + $(TEST_DOCKER_RESOURCE_CONSTRAINTS) \ + $(TEST_MODCACHE_VOL) \ + $(TEST_BUILDCACHE_VOL) \ + -v $(MAIN_GOPATH)/bin/linux_amd64/:/go/bin \ + -v $(shell pwd):/consul \ + $(GO_BUILD_TAG) \ + make test-internal + +other-consul: @echo "--> Checking for other consul instances" @if ps -ef | grep 'consul agent' | grep -v grep ; then \ echo "Found other running consul agents. This may affect your tests." ; \ exit 1 ; \ fi -# Use GO_TEST_FLAGS to run specific tests: -# make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic" -# NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment. -# You can also specify the envoy version (example: 1.27.0) setting the environment variable: ENVOY_VERSION=1.27.0 -.PHONY: test-envoy-integ -test-envoy-integ: $(ENVOY_INTEG_DEPS) ## Run envoy integration tests. - @go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy - -# NOTE: Use DOCKER_BUILDKIT=0, if docker build fails to resolve consul:local base image -.PHONY: test-compat-integ-setup -test-compat-integ-setup: test-deployer-setup - @# 'consul-envoy:target-version' is needed by compatibility integ test - @docker build -t consul-envoy:target-version --build-arg CONSUL_IMAGE=$(CONSUL_COMPAT_TEST_IMAGE):local --build-arg ENVOY_VERSION=${ENVOY_VERSION} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets - @docker build -t consul-dataplane:local --build-arg CONSUL_IMAGE=$(CONSUL_COMPAT_TEST_IMAGE):local --build-arg CONSUL_DATAPLANE_IMAGE=${CONSUL_DATAPLANE_IMAGE} -f ./test/integration/consul-container/assets/Dockerfile-consul-dataplane ./test/integration/consul-container/assets - -# NOTE: Use DOCKER_BUILDKIT=0, if docker build fails to resolve consul:local base image -.PHONY: test-deployer-setup -test-deployer-setup: dev-docker - @docker tag consul-dev:latest $(CONSUL_COMPAT_TEST_IMAGE):local - @docker run --rm -t $(CONSUL_COMPAT_TEST_IMAGE):local consul version - -.PHONY: test-deployer -test-deployer: test-deployer-setup ## Run deployer-based integration tests (skipping peering_commontopo). - @cd ./test-integ && \ - NOLOGBUFFER=1 \ - TEST_LOG_LEVEL=debug \ - DEPLOYER_CONSUL_DATAPLANE_IMAGE=$(DEPLOYER_CONSUL_DATAPLANE_IMAGE) \ - gotestsum \ - --raw-command \ - --format=standard-verbose \ - --debug \ - -- \ - go test \ - -tags "$(GOTAGS)" \ - -timeout=20m \ - -json \ - $(shell sh -c "cd test-integ ; go list -tags \"$(GOTAGS)\" ./... | grep -v peering_commontopo") \ - --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --target-version local \ - --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --latest-version latest - -.PHONY: test-deployer-peering -test-deployer-peering: test-deployer-setup ## Run deployer-based integration tests (just peering_commontopo). - @cd ./test-integ/peering_commontopo && \ - NOLOGBUFFER=1 \ - TEST_LOG_LEVEL=debug \ - DEPLOYER_CONSUL_DATAPLANE_IMAGE=$(DEPLOYER_CONSUL_DATAPLANE_IMAGE) \ - gotestsum \ - --raw-command \ - --format=standard-verbose \ - --debug \ - -- \ - go test \ - -tags "$(GOTAGS)" \ - -timeout=20m \ - -json \ - . \ - --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --target-version local \ - --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --latest-version latest - +.PHONY: fmt +fmt: $(foreach mod,$(GO_MODULES),fmt/$(mod)) -.PHONY: test-compat-integ -test-compat-integ: test-compat-integ-setup ## Run consul-container based integration tests. -ifeq ("$(GOTESTSUM_PATH)","") - @cd ./test/integration/consul-container && \ - go test \ - -v \ - -timeout=30m \ - ./... \ - --tags $(GOTAGS) \ - --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --target-version local \ - --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --latest-version latest -else - @cd ./test/integration/consul-container && \ - gotestsum \ - --format=short-verbose \ - --debug \ - --rerun-fails=3 \ - --packages="./..." \ - -- \ - --tags $(GOTAGS) \ - -timeout=30m \ - ./... \ - --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --target-version local \ - --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --latest-version latest -endif +.PHONY: fmt/% +fmt/%: + @echo "--> Running go fmt ($*)" + @cd $* && gofmt -s -l -w . -.PHONY: test-metrics-integ -test-metrics-integ: test-compat-integ-setup ## Test metrics integ - @cd ./test/integration/consul-container && \ - go test -v -timeout=7m ./test/metrics \ - --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --target-version local \ - --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ - --latest-version latest +.PHONY: lint +lint: $(foreach mod,$(GO_MODULES),lint/$(mod)) lint-container-test-deps -.PHONY: test-connect-ca-providers -test-connect-ca-providers: ## Running /agent/connect/ca tests in verbose mode - @echo "Running /agent/connect/ca tests in verbose mode" - @go test -v ./agent/connect/ca - @go test -v ./agent/consul -run Vault - @go test -v ./agent -run Vault +.PHONY: lint/% +lint/%: + @echo "--> Running golangci-lint ($*)" + @cd $* && GOWORK=off golangci-lint run --build-tags '$(GOTAGS)' + @echo "--> Running lint-consul-retry ($*)" + @cd $* && GOWORK=off lint-consul-retry + @echo "--> Running enumcover ($*)" + @cd $* && GOWORK=off enumcover ./... -##@ UI +.PHONY: lint-container-test-deps +lint-container-test-deps: + @echo "--> Checking container tests for bad dependencies" + @cd test/integration/consul-container && ( \ + found="$$(go list -m all | grep -c '^github.com/hashicorp/consul ')" ; \ + if [[ "$$found" != "0" ]]; then \ + echo "test/integration/consul-container: This project should not depend on the root consul module" >&2 ; \ + exit 1 ; \ + fi \ + ) -.PHONY: ui -ui: ui-docker ## Build the static web ui inside a Docker container. For local testing only; do not commit these assets. +# Build the static web ui inside a Docker container. For local testing only; do not commit these assets. +ui: ui-docker +# Build the static web ui with yarn. This is the version to commit. .PHONY: ui-regen -ui-regen: ## Build the static web ui with yarn. This is the version to commit. +ui-regen: cd $(CURDIR)/ui && make && cd .. rm -rf $(CURDIR)/agent/uiserver/dist mv $(CURDIR)/ui/packages/consul-ui/dist $(CURDIR)/agent/uiserver/ -.PHONY: ui-build-image -ui-build-image: ## Building UI build container - @echo "Building UI build container" - @docker build $(NOCACHE) $(QUIET) -t $(UI_BUILD_TAG) - < build-support/docker/Build-UI.dockerfile - -.PHONY: ui-docker -ui-docker: ui-build-image ## Builds ui within docker container and copy all the relevant artifacts out of the containers back to the source - @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui - -##@ Tools - -.PHONY: tools -tools: ## Installs various supporting Go tools. +tools: @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh .PHONY: lint-tools -lint-tools: ## Install tools for linting +lint-tools: @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh -lint +.PHONY: proto-tools +proto-tools: + @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh -protobuf + .PHONY: codegen-tools -codegen-tools: ## Install tools for codegen +codegen-tools: @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh -codegen -.PHONY: codegen -codegen: codegen-tools ## Deep copy +.PHONY: deep-copy +deep-copy: codegen-tools @$(SHELL) $(CURDIR)/agent/structs/deep-copy.sh @$(SHELL) $(CURDIR)/agent/proxycfg/deep-copy.sh @$(SHELL) $(CURDIR)/agent/consul/state/deep-copy.sh @$(SHELL) $(CURDIR)/agent/config/deep-copy.sh - copywrite headers - # Special case for MPL headers in /api and /sdk - cd api && $(CURDIR)/build-support/scripts/copywrite-exceptions.sh - cd sdk && $(CURDIR)/build-support/scripts/copywrite-exceptions.sh - -print-% : ; @echo $($*) ## utility to echo a makefile variable (i.e. 'make print-GOPATH') -.PHONY: module-versions -module-versions: ## Print a list of modules which can be updated. Columns are: module current_version date_of_current_version latest_version - @go list -m -u -f '{{if .Update}} {{printf "%-50v %-40s" .Path .Version}} {{with .Time}} {{ .Format "2006-01-02" -}} {{else}} {{printf "%9s" ""}} {{end}} {{ .Update.Version}} {{end}}' all - -.PHONY: docs -docs: ## Point your web browser to http://localhost:3000/consul to live render docs from ./website/ - make -C website - -##@ Release - -.PHONY: version -version: ## Current Consul version +version: @echo -n "Version: " @$(SHELL) $(CURDIR)/build-support/scripts/version.sh @echo -n "Version + release: " @@ -541,20 +421,26 @@ version: ## Current Consul version @echo -n "Version + release + git: " @$(SHELL) $(CURDIR)/build-support/scripts/version.sh -r -g -.PHONY: docker-images + docker-images: go-build-image ui-build-image -.PHONY: go-build-image -go-build-image: ## Building Golang build container +go-build-image: @echo "Building Golang $(GOLANG_VERSION) build container" @docker build $(NOCACHE) $(QUIET) -t $(GO_BUILD_TAG) --build-arg GOLANG_VERSION=$(GOLANG_VERSION) - < build-support/docker/Build-Go.dockerfile -.PHONY: consul-docker -consul-docker: go-build-image ## Builds consul in a docker container and then dumps executable into ./pkg/bin/... +ui-build-image: + @echo "Building UI build container" + @docker build $(NOCACHE) $(QUIET) -t $(UI_BUILD_TAG) - < build-support/docker/Build-UI.dockerfile + +# Builds consul in a docker container and then dumps executable into ./pkg/bin/... +consul-docker: go-build-image @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh consul -.PHONY: docker-envoy-integ -docker-envoy-integ: ## Build image used to run integration tests locally. +ui-docker: ui-build-image + @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui + +# Build image used to run integration tests locally. +docker-envoy-integ: $(MAKE) GOARCH=amd64 linux docker build \ --platform linux/amd64 $(NOCACHE) $(QUIET) \ @@ -563,30 +449,87 @@ docker-envoy-integ: ## Build image used to run integration tests locally. $(CURDIR)/pkg/bin/linux_amd64 \ -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile -##@ Proto +# Run integration tests. +# Use GO_TEST_FLAGS to run specific tests: +# make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic" +# NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment. +test-envoy-integ: $(ENVOY_INTEG_DEPS) + @go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy -.PHONY: proto -proto: proto-tools proto-gen proto-mocks ## Protobuf setup command +.PHONY: test-compat-integ +test-compat-integ: test-compat-integ-setup +ifeq ("$(GOTESTSUM_PATH)","") + @cd ./test/integration/consul-container && \ + go test \ + -v \ + -timeout=30m \ + ./... \ + --tags $(GOTAGS) \ + --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --target-version local \ + --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --latest-version latest +else + @cd ./test/integration/consul-container && \ + gotestsum \ + --format=short-verbose \ + --debug \ + --rerun-fails=3 \ + --packages="./..." \ + -- \ + --tags $(GOTAGS) \ + -timeout=30m \ + ./... \ + --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --target-version local \ + --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --latest-version latest +endif -.PHONY: proto-tools -proto-tools: ## Install tools for protobuf - @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh -protobuf +# NOTE: Use DOCKER_BUILDKIT=0, if docker build fails to resolve consul:local base image +.PHONY: test-compat-integ-setup +test-compat-integ-setup: dev-docker + @docker tag consul-dev:latest $(CONSUL_COMPAT_TEST_IMAGE):local + @docker run --rm -t $(CONSUL_COMPAT_TEST_IMAGE):local consul version + @# 'consul-envoy:target-version' is needed by compatibility integ test + @docker build -t consul-envoy:target-version --build-arg CONSUL_IMAGE=$(CONSUL_COMPAT_TEST_IMAGE):local --build-arg ENVOY_VERSION=${ENVOY_VERSION} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets + +.PHONY: test-metrics-integ +test-metrics-integ: test-compat-integ-setup + @cd ./test/integration/consul-container && \ + go test -v -timeout=7m ./test/metrics \ + --target-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --target-version local \ + --latest-image $(CONSUL_COMPAT_TEST_IMAGE) \ + --latest-version latest + +test-connect-ca-providers: + @echo "Running /agent/connect/ca tests in verbose mode" + @go test -v ./agent/connect/ca + @go test -v ./agent/consul -run Vault + @go test -v ./agent -run Vault + +.PHONY: proto +proto: proto-tools proto-gen proto-mocks .PHONY: proto-gen -proto-gen: proto-tools ## Regenerates all Go files from protobuf definitions +proto-gen: proto-tools @$(SHELL) $(CURDIR)/build-support/scripts/protobuf.sh .PHONY: proto-mocks -proto-mocks: ## Proto mocks - @rm -rf grpcmocks/* - @mockery --config .grpcmocks.yaml +proto-mocks: + for dir in $(MOCKED_PB_DIRS) ; do \ + cd proto-public && \ + rm -f $$dir/mock*.go && \ + mockery --dir $$dir --inpackage --all --recursive --log-level trace ; \ + done .PHONY: proto-format -proto-format: proto-tools ## Proto format +proto-format: proto-tools @buf format -w .PHONY: proto-lint -proto-lint: proto-tools ## Proto lint +proto-lint: proto-tools @buf lint @for fn in $$(find proto -name '*.proto'); do \ if [[ "$$fn" = "proto/private/pbsubscribe/subscribe.proto" ]]; then \ @@ -601,14 +544,21 @@ proto-lint: proto-tools ## Proto lint fi \ done -##@ Envoy +# utility to echo a makefile variable (i.e. 'make print-PROTOC_VERSION') +print-% : ; @echo $($*) + +.PHONY: module-versions +# Print a list of modules which can be updated. +# Columns are: module current_version date_of_current_version latest_version +module-versions: + @go list -m -u -f '{{if .Update}} {{printf "%-50v %-40s" .Path .Version}} {{with .Time}} {{ .Format "2006-01-02" -}} {{else}} {{printf "%9s" ""}} {{end}} {{ .Update.Version}} {{end}}' all .PHONY: envoy-library -envoy-library: ## Ensures that all of the protobuf packages present in the github.com/envoyproxy/go-control-plane library are referenced in the consul codebase +envoy-library: @$(SHELL) $(CURDIR)/build-support/scripts/envoy-library-references.sh .PHONY: envoy-regen -envoy-regen: ## Regenerating envoy golden files +envoy-regen: $(info regenerating envoy golden files) @for d in endpoints listeners routes clusters rbac; do \ if [[ -d "agent/xds/testdata/$${d}" ]]; then \ @@ -619,18 +569,25 @@ envoy-regen: ## Regenerating envoy golden files @find "command/connect/envoy/testdata" -name '*.golden' -delete @go test -tags '$(GOTAGS)' ./command/connect/envoy -update -##@ Help - -# The help target prints out all targets with their descriptions organized -# beneath their categories. The categories are represented by '##@' and the -# target descriptions by '##'. The awk commands is responsible for reading the -# entire set of makefiles included in this invocation, looking for lines of the -# file as xyz: ## something, and then pretty-format the target and help. Then, -# if there's a line with ##@ something, that gets pretty-printed as a category. -# More info on the usage of ANSI control characters for terminal formatting: -# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters -# More info on the awk command: -# http://linuxcommand.org/lc3_adv_awk.php +# Point your web browser to http://localhost:3000/consul to live render docs from ./website/ +.PHONY: docs +docs: + make -C website + .PHONY: help -help: ## Display this help. - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) +help: + $(info available make targets) + $(info ----------------------) + @grep "^[a-z0-9-][a-z0-9.-]*:" GNUmakefile | cut -d':' -f1 | sort + +.PHONY: all bin dev dist cov test test-internal cover lint ui tools +.PHONY: docker-images go-build-image ui-build-image consul-docker ui-docker +.PHONY: version test-envoy-integ + +.PHONY: copywrite-headers +copywrite-headers: + copywrite headers + # Special case for MPL headers in /api and /sdk + cd api && $(CURDIR)/build-support/scripts/copywrite-exceptions.sh + cd sdk && $(CURDIR)/build-support/scripts/copywrite-exceptions.sh + cd proto-public && $(CURDIR)/build-support/scripts/copywrite-exceptions.sh diff --git a/LICENSE b/LICENSE index e20f243153660..4101bc4b95119 100644 --- a/LICENSE +++ b/LICENSE @@ -4,7 +4,7 @@ License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved. Parameters Licensor: HashiCorp, Inc. -Licensed Work: Consul Version 1.17.0 or later. The Licensed Work is (c) 2024 +Licensed Work: Consul Version 1.16.4 or later. The Licensed Work is (c) 2024 HashiCorp, Inc. Additional Use Grant: You may make production use of the Licensed Work, provided Your use does not include offering the Licensed Work to third diff --git a/NOTICE.md b/NOTICE.md new file mode 100644 index 0000000000000..fe34b5e571555 --- /dev/null +++ b/NOTICE.md @@ -0,0 +1,3 @@ +Copyright © 2014-2018 HashiCorp, Inc. + +This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this project, you can obtain one at http://mozilla.org/MPL/2.0/. diff --git a/README.md b/README.md index c23053f0e1f7d..93e3176cc048a 100644 --- a/README.md +++ b/README.md @@ -23,9 +23,6 @@ Consul provides several key features: can use sidecar proxies in a service mesh configuration to establish TLS connections for inbound and outbound connections with Transparent Proxy. -* **API Gateway** - Consul API Gateway manages access to services within Consul Service Mesh, - allow users to define traffic and authorization policies to services deployed within the mesh. - * **Service Discovery** - Consul makes it simple for services to register themselves and to discover other services via a DNS or HTTP interface. External services such as SaaS providers can be registered as well. @@ -35,8 +32,7 @@ Consul provides several key features: discovery prevents routing traffic to unhealthy hosts and enables service level circuit breakers. -* **Dynamic App Configuration** - An HTTP API that allows users to store indexed objects within Consul, - for storing configuration parameters and application metadata. +* **Dynamic App Configuration** - An HTTP API that allows users to store indexed objects, like configuration parameters and application metadata, within Consul. Consul runs on Linux, macOS, FreeBSD, Solaris, and Windows and includes an optional [browser based UI](https://demo.consul.io). A commercial version diff --git a/acl/MockAuthorizer.go b/acl/MockAuthorizer.go index e3a97ceec9bf6..01afb5fea6654 100644 --- a/acl/MockAuthorizer.go +++ b/acl/MockAuthorizer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -59,31 +59,6 @@ func (m *MockAuthorizer) EventWrite(segment string, ctx *AuthorizerContext) Enfo return ret.Get(0).(EnforcementDecision) } -// IdentityRead checks for permission to read a given workload identity. -func (m *MockAuthorizer) IdentityRead(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// IdentityReadAll checks for permission to read all workload identities. -func (m *MockAuthorizer) IdentityReadAll(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - -// IdentityWrite checks for permission to create or update a given -// workload identity. -func (m *MockAuthorizer) IdentityWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// IdentityWriteAny checks for write permission on any workload identity. -func (m *MockAuthorizer) IdentityWriteAny(ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(ctx) - return ret.Get(0).(EnforcementDecision) -} - // IntentionDefaultAllow determines the default authorized behavior // when no intentions match a Connect request. func (m *MockAuthorizer) IntentionDefaultAllow(ctx *AuthorizerContext) EnforcementDecision { @@ -224,11 +199,6 @@ func (m *MockAuthorizer) ServiceReadAll(ctx *AuthorizerContext) EnforcementDecis return ret.Get(0).(EnforcementDecision) } -func (m *MockAuthorizer) ServiceReadPrefix(prefix string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(prefix, ctx) - return ret.Get(0).(EnforcementDecision) -} - // ServiceWrite checks for permission to create or update a given // service func (m *MockAuthorizer) ServiceWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { @@ -261,19 +231,6 @@ func (m *MockAuthorizer) Snapshot(ctx *AuthorizerContext) EnforcementDecision { return ret.Get(0).(EnforcementDecision) } -// TrafficPermissionsRead determines if specific traffic permissions can be read. -func (m *MockAuthorizer) TrafficPermissionsRead(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - -// TrafficPermissionsWrite determines if specific traffic permissions can be -// created, modified, or deleted. -func (m *MockAuthorizer) TrafficPermissionsWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { - ret := m.Called(segment, ctx) - return ret.Get(0).(EnforcementDecision) -} - func (p *MockAuthorizer) ToAllowAuthorizer() AllowAuthorizer { return AllowAuthorizer{Authorizer: p} } diff --git a/acl/acl.go b/acl/acl.go index 753db01516e8c..75789dd17498c 100644 --- a/acl/acl.go +++ b/acl/acl.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl diff --git a/acl/acl_ce.go b/acl/acl_ce.go index 7d2b8513b8327..58f92022ba55b 100644 --- a/acl/acl_ce.go +++ b/acl/acl_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package acl diff --git a/acl/acl_test.go b/acl/acl_test.go index 28542024e9567..3734eb1572fd3 100644 --- a/acl/acl_test.go +++ b/acl/acl_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -40,22 +40,6 @@ func checkAllowEventWrite(t *testing.T, authz Authorizer, prefix string, entCtx require.Equal(t, Allow, authz.EventWrite(prefix, entCtx)) } -func checkAllowIdentityRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Allow, authz.IdentityRead(prefix, entCtx)) -} - -func checkAllowIdentityReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { - require.Equal(t, Allow, authz.IdentityReadAll(entCtx)) -} - -func checkAllowIdentityWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Allow, authz.IdentityWrite(prefix, entCtx)) -} - -func checkAllowIdentityWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { - require.Equal(t, Allow, authz.IdentityWriteAny(entCtx)) -} - func checkAllowIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Allow, authz.IntentionDefaultAllow(entCtx)) } @@ -164,14 +148,6 @@ func checkAllowSnapshot(t *testing.T, authz Authorizer, prefix string, entCtx *A require.Equal(t, Allow, authz.Snapshot(entCtx)) } -func checkAllowTrafficPermissionsRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Allow, authz.TrafficPermissionsRead(prefix, entCtx)) -} - -func checkAllowTrafficPermissionsWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Allow, authz.TrafficPermissionsWrite(prefix, entCtx)) -} - func checkDenyACLRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Deny, authz.ACLRead(entCtx)) } @@ -196,22 +172,6 @@ func checkDenyEventWrite(t *testing.T, authz Authorizer, prefix string, entCtx * require.Equal(t, Deny, authz.EventWrite(prefix, entCtx)) } -func checkDenyIdentityRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Deny, authz.IdentityRead(prefix, entCtx)) -} - -func checkDenyIdentityReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { - require.Equal(t, Deny, authz.IdentityReadAll(entCtx)) -} - -func checkDenyIdentityWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Deny, authz.IdentityWrite(prefix, entCtx)) -} - -func checkDenyIdentityWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { - require.Equal(t, Deny, authz.IdentityWriteAny(entCtx)) -} - func checkDenyIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Deny, authz.IntentionDefaultAllow(entCtx)) } @@ -300,14 +260,6 @@ func checkDenyServiceReadAll(t *testing.T, authz Authorizer, _ string, entCtx *A require.Equal(t, Deny, authz.ServiceReadAll(entCtx)) } -func checkAllowServiceReadPrefix(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Allow, authz.ServiceReadPrefix(prefix, entCtx)) -} - -func checkDenyServiceReadPrefix(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Deny, authz.ServiceReadPrefix(prefix, entCtx)) -} - func checkDenyServiceWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Deny, authz.ServiceWrite(prefix, entCtx)) } @@ -328,14 +280,6 @@ func checkDenySnapshot(t *testing.T, authz Authorizer, prefix string, entCtx *Au require.Equal(t, Deny, authz.Snapshot(entCtx)) } -func checkDenyTrafficPermissionsRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Deny, authz.TrafficPermissionsRead(prefix, entCtx)) -} - -func checkDenyTrafficPermissionsWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Deny, authz.TrafficPermissionsWrite(prefix, entCtx)) -} - func checkDefaultACLRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Default, authz.ACLRead(entCtx)) } @@ -360,22 +304,6 @@ func checkDefaultEventWrite(t *testing.T, authz Authorizer, prefix string, entCt require.Equal(t, Default, authz.EventWrite(prefix, entCtx)) } -func checkDefaultIdentityRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Default, authz.IdentityRead(prefix, entCtx)) -} - -func checkDefaultIdentityReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { - require.Equal(t, Default, authz.IdentityReadAll(entCtx)) -} - -func checkDefaultIdentityWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Default, authz.IdentityWrite(prefix, entCtx)) -} - -func checkDefaultIdentityWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { - require.Equal(t, Default, authz.IdentityWriteAny(entCtx)) -} - func checkDefaultIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Default, authz.IntentionDefaultAllow(entCtx)) } @@ -464,10 +392,6 @@ func checkDefaultServiceReadAll(t *testing.T, authz Authorizer, _ string, entCtx require.Equal(t, Default, authz.ServiceReadAll(entCtx)) } -func checkDefaultServiceReadPrefix(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { - require.Equal(t, Default, authz.ServiceReadPrefix(prefix, entCtx)) -} - func checkDefaultServiceWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Default, authz.ServiceWrite(prefix, entCtx)) } @@ -516,10 +440,6 @@ func TestACL(t *testing.T) { {name: "DenyIntentionDefaultAllow", check: checkDenyIntentionDefaultAllow}, {name: "DenyIntentionRead", check: checkDenyIntentionRead}, {name: "DenyIntentionWrite", check: checkDenyIntentionWrite}, - {name: "DenyIdentityRead", check: checkDenyIdentityRead}, - {name: "DenyIdentityReadAll", check: checkDenyIdentityReadAll}, - {name: "DenyIdentityWrite", check: checkDenyIdentityWrite}, - {name: "DenyIdentityWriteAny", check: checkDenyIdentityWriteAny}, {name: "DenyKeyRead", check: checkDenyKeyRead}, {name: "DenyKeyringRead", check: checkDenyKeyringRead}, {name: "DenyKeyringWrite", check: checkDenyKeyringWrite}, @@ -538,7 +458,6 @@ func TestACL(t *testing.T) { {name: "DenyServiceRead", check: checkDenyServiceRead}, {name: "DenyServiceReadAll", check: checkDenyServiceReadAll}, {name: "DenyServiceWrite", check: checkDenyServiceWrite}, - {name: "DenyServiceWriteAny", check: checkDenyServiceWriteAny}, {name: "DenySessionRead", check: checkDenySessionRead}, {name: "DenySessionWrite", check: checkDenySessionWrite}, {name: "DenySnapshot", check: checkDenySnapshot}, @@ -554,10 +473,6 @@ func TestACL(t *testing.T) { {name: "AllowAgentWrite", check: checkAllowAgentWrite}, {name: "AllowEventRead", check: checkAllowEventRead}, {name: "AllowEventWrite", check: checkAllowEventWrite}, - {name: "AllowIdentityRead", check: checkAllowIdentityRead}, - {name: "AllowIdentityReadAll", check: checkAllowIdentityReadAll}, - {name: "AllowIdentityWrite", check: checkAllowIdentityWrite}, - {name: "AllowIdentityWriteAny", check: checkAllowIdentityWriteAny}, {name: "AllowIntentionDefaultAllow", check: checkAllowIntentionDefaultAllow}, {name: "AllowIntentionRead", check: checkAllowIntentionRead}, {name: "AllowIntentionWrite", check: checkAllowIntentionWrite}, @@ -579,12 +494,9 @@ func TestACL(t *testing.T) { {name: "AllowServiceRead", check: checkAllowServiceRead}, {name: "AllowServiceReadAll", check: checkAllowServiceReadAll}, {name: "AllowServiceWrite", check: checkAllowServiceWrite}, - {name: "AllowServiceWriteAny", check: checkAllowServiceWriteAny}, {name: "AllowSessionRead", check: checkAllowSessionRead}, {name: "AllowSessionWrite", check: checkAllowSessionWrite}, {name: "DenySnapshot", check: checkDenySnapshot}, - {name: "AllowTrafficPermissionsRead", check: checkAllowTrafficPermissionsRead}, - {name: "AllowTrafficPermissionsWrite", check: checkAllowTrafficPermissionsWrite}, }, }, { @@ -597,10 +509,6 @@ func TestACL(t *testing.T) { {name: "AllowAgentWrite", check: checkAllowAgentWrite}, {name: "AllowEventRead", check: checkAllowEventRead}, {name: "AllowEventWrite", check: checkAllowEventWrite}, - {name: "AllowIdentityRead", check: checkAllowIdentityRead}, - {name: "AllowIdentityReadAll", check: checkAllowIdentityReadAll}, - {name: "AllowIdentityWrite", check: checkAllowIdentityWrite}, - {name: "AllowIdentityWriteAny", check: checkAllowIdentityWriteAny}, {name: "AllowIntentionDefaultAllow", check: checkAllowIntentionDefaultAllow}, {name: "AllowIntentionRead", check: checkAllowIntentionRead}, {name: "AllowIntentionWrite", check: checkAllowIntentionWrite}, @@ -622,12 +530,9 @@ func TestACL(t *testing.T) { {name: "AllowServiceRead", check: checkAllowServiceRead}, {name: "AllowServiceReadAll", check: checkAllowServiceReadAll}, {name: "AllowServiceWrite", check: checkAllowServiceWrite}, - {name: "AllowServiceWriteAny", check: checkAllowServiceWriteAny}, {name: "AllowSessionRead", check: checkAllowSessionRead}, {name: "AllowSessionWrite", check: checkAllowSessionWrite}, {name: "AllowSnapshot", check: checkAllowSnapshot}, - {name: "AllowTrafficPermissionsRead", check: checkAllowTrafficPermissionsRead}, - {name: "AllowTrafficPermissionsWrite", check: checkAllowTrafficPermissionsWrite}, }, }, { @@ -1000,134 +905,6 @@ func TestACL(t *testing.T) { {name: "ChildOverrideWriteAllowed", prefix: "override", check: checkAllowAgentWrite}, }, }, - { - name: "IdentityDefaultAllowPolicyDeny", - defaultPolicy: AllowAll(), - policyStack: []*Policy{ - { - PolicyRules: PolicyRules{ - Identities: []*IdentityRule{ - { - Name: "foo", - Policy: PolicyDeny, - }, - }, - IdentityPrefixes: []*IdentityRule{ - { - Name: "prefix", - Policy: PolicyDeny, - }, - }, - }, - }, - }, - checks: []aclCheck{ - {name: "IdentityFooReadDenied", prefix: "foo", check: checkDenyIdentityRead}, - {name: "IdentityFooWriteDenied", prefix: "foo", check: checkDenyIdentityWrite}, - {name: "IdentityPrefixReadDenied", prefix: "prefix", check: checkDenyIdentityRead}, - {name: "IdentityPrefixWriteDenied", prefix: "prefix", check: checkDenyIdentityWrite}, - {name: "IdentityBarReadAllowed", prefix: "fail", check: checkAllowIdentityRead}, - {name: "IdentityBarWriteAllowed", prefix: "fail", check: checkAllowIdentityWrite}, - }, - }, - { - name: "IdentityDefaultDenyPolicyAllow", - defaultPolicy: DenyAll(), - policyStack: []*Policy{ - { - PolicyRules: PolicyRules{ - Identities: []*IdentityRule{ - { - Name: "foo", - Policy: PolicyWrite, - }, - }, - IdentityPrefixes: []*IdentityRule{ - { - Name: "prefix", - Policy: PolicyRead, - }, - }, - }, - }, - }, - checks: []aclCheck{ - {name: "IdentityFooReadAllowed", prefix: "foo", check: checkAllowIdentityRead}, - {name: "IdentityFooWriteAllowed", prefix: "foo", check: checkAllowIdentityWrite}, - {name: "IdentityPrefixReadAllowed", prefix: "prefix", check: checkAllowIdentityRead}, - {name: "IdentityPrefixWriteDenied", prefix: "prefix", check: checkDenyIdentityWrite}, - {name: "IdentityBarReadDenied", prefix: "fail", check: checkDenyIdentityRead}, - {name: "IdentityBarWriteDenied", prefix: "fail", check: checkDenyIdentityWrite}, - }, - }, - { - name: "IdentityDefaultDenyPolicyComplex", - defaultPolicy: DenyAll(), - policyStack: []*Policy{ - { - PolicyRules: PolicyRules{ - Identities: []*IdentityRule{ - { - Name: "football", - Policy: PolicyRead, - }, - { - Name: "prefix-forbidden", - Policy: PolicyDeny, - Intentions: PolicyDeny, - }, - }, - IdentityPrefixes: []*IdentityRule{ - { - Name: "foo", - Policy: PolicyWrite, - Intentions: PolicyWrite, - }, - { - Name: "prefix", - Policy: PolicyRead, - Intentions: PolicyWrite, - }, - }, - }, - }, - { - PolicyRules: PolicyRules{ - Identities: []*IdentityRule{ - { - Name: "foozball", - Policy: PolicyWrite, - Intentions: PolicyRead, - }, - }, - }, - }, - }, - checks: []aclCheck{ - {name: "IdentityReadAllowed", prefix: "foo", check: checkAllowIdentityRead}, - {name: "IdentityWriteAllowed", prefix: "foo", check: checkAllowIdentityWrite}, - {name: "TrafficPermissionsReadAllowed", prefix: "foo", check: checkAllowTrafficPermissionsRead}, - {name: "TrafficPermissionsWriteAllowed", prefix: "foo", check: checkAllowTrafficPermissionsWrite}, - {name: "IdentityReadAllowed", prefix: "football", check: checkAllowIdentityRead}, - {name: "IdentityWriteDenied", prefix: "football", check: checkDenyIdentityWrite}, - {name: "TrafficPermissionsReadAllowed", prefix: "football", check: checkAllowTrafficPermissionsRead}, - // This might be surprising but omitting intention rule gives at most intention:read - // if we have identity:write perms. This matches services as well. - {name: "TrafficPermissionsWriteDenied", prefix: "football", check: checkDenyTrafficPermissionsWrite}, - {name: "IdentityReadAllowed", prefix: "prefix", check: checkAllowIdentityRead}, - {name: "IdentityWriteDenied", prefix: "prefix", check: checkDenyIdentityWrite}, - {name: "TrafficPermissionsReadAllowed", prefix: "prefix", check: checkAllowTrafficPermissionsRead}, - {name: "TrafficPermissionsWriteDenied", prefix: "prefix", check: checkAllowTrafficPermissionsWrite}, - {name: "IdentityReadDenied", prefix: "prefix-forbidden", check: checkDenyIdentityRead}, - {name: "IdentityWriteDenied", prefix: "prefix-forbidden", check: checkDenyIdentityWrite}, - {name: "TrafficPermissionsReadDenied", prefix: "prefix-forbidden", check: checkDenyTrafficPermissionsRead}, - {name: "TrafficPermissionsWriteDenied", prefix: "prefix-forbidden", check: checkDenyTrafficPermissionsWrite}, - {name: "IdentityReadAllowed", prefix: "foozball", check: checkAllowIdentityRead}, - {name: "IdentityWriteAllowed", prefix: "foozball", check: checkAllowIdentityWrite}, - {name: "TrafficPermissionsReadAllowed", prefix: "foozball", check: checkAllowTrafficPermissionsRead}, - {name: "TrafficPermissionsWriteDenied", prefix: "foozball", check: checkDenyTrafficPermissionsWrite}, - }, - }, { name: "KeyringDefaultAllowPolicyDeny", defaultPolicy: AllowAll(), diff --git a/acl/authorizer.go b/acl/authorizer.go index 39bac5f7b08b8..f4515f11c92ac 100644 --- a/acl/authorizer.go +++ b/acl/authorizer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -43,7 +43,6 @@ const ( ResourceACL Resource = "acl" ResourceAgent Resource = "agent" ResourceEvent Resource = "event" - ResourceIdentity Resource = "identity" ResourceIntention Resource = "intention" ResourceKey Resource = "key" ResourceKeyring Resource = "keyring" @@ -78,25 +77,8 @@ type Authorizer interface { // EventWrite determines if a specific event may be fired. EventWrite(string, *AuthorizerContext) EnforcementDecision - // IdentityRead checks for permission to read a given workload identity. - IdentityRead(string, *AuthorizerContext) EnforcementDecision - - // IdentityReadAll checks for permission to read all workload identities. - IdentityReadAll(*AuthorizerContext) EnforcementDecision - - // IdentityWrite checks for permission to create or update a given - // workload identity. - IdentityWrite(string, *AuthorizerContext) EnforcementDecision - - // IdentityWriteAny checks for write permission on any workload identity. - IdentityWriteAny(*AuthorizerContext) EnforcementDecision - // IntentionDefaultAllow determines the default authorized behavior // when no intentions match a Connect request. - // - // Deprecated: Use DefaultIntentionPolicy under agent configuration. - // Moving forwards, intentions will not inherit default allow behavior - // from the ACL system. IntentionDefaultAllow(*AuthorizerContext) EnforcementDecision // IntentionRead determines if a specific intention can be read. @@ -175,9 +157,6 @@ type Authorizer interface { // ServiceReadAll checks for permission to read all services ServiceReadAll(*AuthorizerContext) EnforcementDecision - // ServiceReadPrefix checks for permission to read services within the given prefix. - ServiceReadPrefix(string, *AuthorizerContext) EnforcementDecision - // ServiceWrite checks for permission to create or update a given // service ServiceWrite(string, *AuthorizerContext) EnforcementDecision @@ -195,13 +174,6 @@ type Authorizer interface { // Snapshot checks for permission to take and restore snapshots. Snapshot(*AuthorizerContext) EnforcementDecision - // TrafficPermissionsRead determines if specific traffic permissions can be read. - TrafficPermissionsRead(string, *AuthorizerContext) EnforcementDecision - - // TrafficPermissionsWrite determines if specific traffic permissions can be - // created, modified, or deleted. - TrafficPermissionsWrite(string, *AuthorizerContext) EnforcementDecision - // Embedded Interface for Consul Enterprise specific ACL enforcement enterpriseAuthorizer @@ -267,36 +239,13 @@ func (a AllowAuthorizer) EventWriteAllowed(name string, ctx *AuthorizerContext) return nil } -// IdentityReadAllowed checks for permission to read a given workload identity, -func (a AllowAuthorizer) IdentityReadAllowed(name string, ctx *AuthorizerContext) error { - if a.Authorizer.IdentityRead(name, ctx) != Allow { - return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessRead, name) - } - return nil -} - -// IdentityReadAllAllowed checks for permission to read all workload identities. -func (a AllowAuthorizer) IdentityReadAllAllowed(ctx *AuthorizerContext) error { - if a.Authorizer.IdentityReadAll(ctx) != Allow { - // This is only used to gate certain UI functions right now (e.g metrics) - return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessRead, "all identities") // read - } - return nil -} - -// IdentityWriteAllowed checks for permission to create or update a given -// workload identity. -func (a AllowAuthorizer) IdentityWriteAllowed(name string, ctx *AuthorizerContext) error { - if a.Authorizer.IdentityWrite(name, ctx) != Allow { - return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessWrite, name) - } - return nil -} - -// IdentityWriteAnyAllowed checks for write permission on any workload identity -func (a AllowAuthorizer) IdentityWriteAnyAllowed(ctx *AuthorizerContext) error { - if a.Authorizer.IdentityWriteAny(ctx) != Allow { - return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessWrite, "any identity") +// IntentionDefaultAllowAllowed determines the default authorized behavior +// when no intentions match a Connect request. +func (a AllowAuthorizer) IntentionDefaultAllowAllowed(ctx *AuthorizerContext) error { + if a.Authorizer.IntentionDefaultAllow(ctx) != Allow { + // This is a bit nuanced, in that this isn't set by a rule, but inherited globally + // TODO(acl-error-enhancements) revisit when we have full accessor info + return PermissionDeniedError{Cause: "Denied by intention default"} } return nil } @@ -318,23 +267,6 @@ func (a AllowAuthorizer) IntentionWriteAllowed(name string, ctx *AuthorizerConte return nil } -// TrafficPermissionsReadAllowed determines if specific traffic permissions can be read. -func (a AllowAuthorizer) TrafficPermissionsReadAllowed(name string, ctx *AuthorizerContext) error { - if a.Authorizer.TrafficPermissionsRead(name, ctx) != Allow { - return PermissionDeniedByACL(a, ctx, ResourceIntention, AccessRead, name) - } - return nil -} - -// TrafficPermissionsWriteAllowed determines if specific traffic permissions can be -// created, modified, or deleted. -func (a AllowAuthorizer) TrafficPermissionsWriteAllowed(name string, ctx *AuthorizerContext) error { - if a.Authorizer.TrafficPermissionsWrite(name, ctx) != Allow { - return PermissionDeniedByACL(a, ctx, ResourceIntention, AccessWrite, name) - } - return nil -} - // KeyListAllowed checks for permission to list keys under a prefix func (a AllowAuthorizer) KeyListAllowed(name string, ctx *AuthorizerContext) error { if a.Authorizer.KeyList(name, ctx) != Allow { @@ -503,14 +435,6 @@ func (a AllowAuthorizer) ServiceReadAllAllowed(ctx *AuthorizerContext) error { return nil } -// ServiceReadPrefixAllowed checks for permission to read services within the given prefix -func (a AllowAuthorizer) ServiceReadPrefixAllowed(prefix string, ctx *AuthorizerContext) error { - if a.Authorizer.ServiceReadPrefix(prefix, ctx) != Allow { - return PermissionDeniedByACL(a, ctx, ResourceService, AccessRead, prefix) // read - } - return nil -} - // ServiceWriteAllowed checks for permission to create or update a given // service func (a AllowAuthorizer) ServiceWriteAllowed(name string, ctx *AuthorizerContext) error { @@ -579,13 +503,6 @@ func Enforce(authz Authorizer, rsc Resource, segment string, access string, ctx case "write": return authz.EventWrite(segment, ctx), nil } - case ResourceIdentity: - switch lowerAccess { - case "read": - return authz.IdentityRead(segment, ctx), nil - case "write": - return authz.IdentityWrite(segment, ctx), nil - } case ResourceIntention: switch lowerAccess { case "read": diff --git a/acl/authorizer_ce.go b/acl/authorizer_ce.go index dafac8692a9a3..ed77d5e81d3f7 100644 --- a/acl/authorizer_ce.go +++ b/acl/authorizer_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package acl diff --git a/acl/authorizer_test.go b/acl/authorizer_test.go index d538a04ad7152..20774841ba8dc 100644 --- a/acl/authorizer_test.go +++ b/acl/authorizer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -188,34 +188,6 @@ func TestACL_Enforce(t *testing.T) { ret: Deny, err: "Invalid access level", }, - { - method: "IdentityRead", - resource: ResourceIdentity, - segment: "foo", - access: "read", - ret: Deny, - }, - { - method: "IdentityRead", - resource: ResourceIdentity, - segment: "foo", - access: "read", - ret: Allow, - }, - { - method: "IdentityWrite", - resource: ResourceIdentity, - segment: "foo", - access: "write", - ret: Deny, - }, - { - method: "IdentityWrite", - resource: ResourceIdentity, - segment: "foo", - access: "write", - ret: Allow, - }, { method: "IntentionRead", resource: ResourceIntention, diff --git a/acl/chained_authorizer.go b/acl/chained_authorizer.go index 26f0c2dfe7fde..9a681187bc1ed 100644 --- a/acl/chained_authorizer.go +++ b/acl/chained_authorizer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -80,40 +80,10 @@ func (c *ChainedAuthorizer) EventWrite(name string, entCtx *AuthorizerContext) E }) } -// IdentityRead checks for permission to read a given workload identity. -func (c *ChainedAuthorizer) IdentityRead(name string, entCtx *AuthorizerContext) EnforcementDecision { - return c.executeChain(func(authz Authorizer) EnforcementDecision { - return authz.IdentityRead(name, entCtx) - }) -} - -// IdentityReadAll checks for permission to read all workload identities. -func (c *ChainedAuthorizer) IdentityReadAll(entCtx *AuthorizerContext) EnforcementDecision { - return c.executeChain(func(authz Authorizer) EnforcementDecision { - return authz.IdentityReadAll(entCtx) - }) -} - -// IdentityWrite checks for permission to create or update a given -// workload identity. -func (c *ChainedAuthorizer) IdentityWrite(name string, entCtx *AuthorizerContext) EnforcementDecision { - return c.executeChain(func(authz Authorizer) EnforcementDecision { - return authz.IdentityWrite(name, entCtx) - }) -} - -// IdentityWriteAny checks for write permission on any workload identity. -func (c *ChainedAuthorizer) IdentityWriteAny(entCtx *AuthorizerContext) EnforcementDecision { - return c.executeChain(func(authz Authorizer) EnforcementDecision { - return authz.IdentityWriteAny(entCtx) - }) -} - // IntentionDefaultAllow determines the default authorized behavior // when no intentions match a Connect request. func (c *ChainedAuthorizer) IntentionDefaultAllow(entCtx *AuthorizerContext) EnforcementDecision { return c.executeChain(func(authz Authorizer) EnforcementDecision { - //nolint:staticcheck return authz.IntentionDefaultAllow(entCtx) }) } @@ -276,12 +246,6 @@ func (c *ChainedAuthorizer) ServiceReadAll(entCtx *AuthorizerContext) Enforcemen }) } -func (c *ChainedAuthorizer) ServiceReadPrefix(prefix string, entCtx *AuthorizerContext) EnforcementDecision { - return c.executeChain(func(authz Authorizer) EnforcementDecision { - return authz.ServiceReadPrefix(prefix, entCtx) - }) -} - // ServiceWrite checks for permission to create or update a given // service func (c *ChainedAuthorizer) ServiceWrite(name string, entCtx *AuthorizerContext) EnforcementDecision { @@ -319,21 +283,6 @@ func (c *ChainedAuthorizer) Snapshot(entCtx *AuthorizerContext) EnforcementDecis }) } -// TrafficPermissionsRead determines if specific traffic permissions can be read. -func (c *ChainedAuthorizer) TrafficPermissionsRead(prefix string, entCtx *AuthorizerContext) EnforcementDecision { - return c.executeChain(func(authz Authorizer) EnforcementDecision { - return authz.TrafficPermissionsRead(prefix, entCtx) - }) -} - -// TrafficPermissionsWrite determines if specific traffic permissions can be -// created, modified, or deleted. -func (c *ChainedAuthorizer) TrafficPermissionsWrite(prefix string, entCtx *AuthorizerContext) EnforcementDecision { - return c.executeChain(func(authz Authorizer) EnforcementDecision { - return authz.TrafficPermissionsWrite(prefix, entCtx) - }) -} - func (c *ChainedAuthorizer) ToAllowAuthorizer() AllowAuthorizer { return AllowAuthorizer{Authorizer: c} } diff --git a/acl/chained_authorizer_test.go b/acl/chained_authorizer_test.go index 01d33a029204e..c17cbc907bf50 100644 --- a/acl/chained_authorizer_test.go +++ b/acl/chained_authorizer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -29,18 +29,6 @@ func (authz testAuthorizer) EventRead(string, *AuthorizerContext) EnforcementDec func (authz testAuthorizer) EventWrite(string, *AuthorizerContext) EnforcementDecision { return EnforcementDecision(authz) } -func (authz testAuthorizer) IdentityRead(string, *AuthorizerContext) EnforcementDecision { - return EnforcementDecision(authz) -} -func (authz testAuthorizer) IdentityReadAll(*AuthorizerContext) EnforcementDecision { - return EnforcementDecision(authz) -} -func (authz testAuthorizer) IdentityWrite(string, *AuthorizerContext) EnforcementDecision { - return EnforcementDecision(authz) -} -func (authz testAuthorizer) IdentityWriteAny(*AuthorizerContext) EnforcementDecision { - return EnforcementDecision(authz) -} func (authz testAuthorizer) IntentionDefaultAllow(*AuthorizerContext) EnforcementDecision { return EnforcementDecision(authz) } @@ -107,9 +95,6 @@ func (authz testAuthorizer) ServiceRead(string, *AuthorizerContext) EnforcementD func (authz testAuthorizer) ServiceReadAll(*AuthorizerContext) EnforcementDecision { return EnforcementDecision(authz) } -func (authz testAuthorizer) ServiceReadPrefix(string, *AuthorizerContext) EnforcementDecision { - return EnforcementDecision(authz) -} func (authz testAuthorizer) ServiceWrite(string, *AuthorizerContext) EnforcementDecision { return EnforcementDecision(authz) } @@ -125,12 +110,6 @@ func (authz testAuthorizer) SessionWrite(string, *AuthorizerContext) Enforcement func (authz testAuthorizer) Snapshot(*AuthorizerContext) EnforcementDecision { return EnforcementDecision(authz) } -func (authz testAuthorizer) TrafficPermissionsRead(string, *AuthorizerContext) EnforcementDecision { - return EnforcementDecision(authz) -} -func (authz testAuthorizer) TrafficPermissionsWrite(string, *AuthorizerContext) EnforcementDecision { - return EnforcementDecision(authz) -} func (authz testAuthorizer) ToAllowAuthorizer() AllowAuthorizer { return AllowAuthorizer{Authorizer: &authz} diff --git a/acl/enterprisemeta_ce.go b/acl/enterprisemeta_ce.go index 7262e79b6dc6f..8b93fd6807968 100644 --- a/acl/enterprisemeta_ce.go +++ b/acl/enterprisemeta_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package acl diff --git a/acl/errors.go b/acl/errors.go index 7f4548ed95d62..7302e0392f178 100644 --- a/acl/errors.go +++ b/acl/errors.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl diff --git a/acl/errors_ce.go b/acl/errors_ce.go index e342137b534e0..8c2e84ac5c415 100644 --- a/acl/errors_ce.go +++ b/acl/errors_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package acl diff --git a/acl/errors_test.go b/acl/errors_test.go index b4e645c073122..4988f695994f6 100644 --- a/acl/errors_test.go +++ b/acl/errors_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl diff --git a/acl/policy.go b/acl/policy.go index 0c88a9041b289..e26c8871314c3 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -59,8 +59,6 @@ type PolicyRules struct { ACL string `hcl:"acl,expand"` Agents []*AgentRule `hcl:"agent,expand"` AgentPrefixes []*AgentRule `hcl:"agent_prefix,expand"` - Identities []*IdentityRule `hcl:"identity,expand"` - IdentityPrefixes []*IdentityRule `hcl:"identity_prefix,expand"` Keys []*KeyRule `hcl:"key,expand"` KeyPrefixes []*KeyRule `hcl:"key_prefix,expand"` Nodes []*NodeRule `hcl:"node,expand"` @@ -92,19 +90,6 @@ type AgentRule struct { Policy string } -// IdentityRule represents a policy for a workload identity -type IdentityRule struct { - Name string `hcl:",key"` - Policy string - - // Intentions is the policy for intentions where this workload identity - // is the destination. This may be empty, in which case the Policy determines - // the intentions policy. - Intentions string - - EnterpriseRule `hcl:",squash"` -} - // KeyRule represents a rule for a key type KeyRule struct { Prefix string `hcl:",key"` @@ -183,30 +168,6 @@ func (pr *PolicyRules) Validate(conf *Config) error { } } - // Validate the identity policies - for _, id := range pr.Identities { - if !isPolicyValid(id.Policy, false) { - return fmt.Errorf("Invalid identity policy: %#v", id) - } - if id.Intentions != "" && !isPolicyValid(id.Intentions, false) { - return fmt.Errorf("Invalid identity intentions policy: %#v", id) - } - if err := id.EnterpriseRule.Validate(id.Policy, conf); err != nil { - return fmt.Errorf("Invalid identity enterprise policy: %#v, got error: %v", id, err) - } - } - for _, id := range pr.IdentityPrefixes { - if !isPolicyValid(id.Policy, false) { - return fmt.Errorf("Invalid identity_prefix policy: %#v", id) - } - if id.Intentions != "" && !isPolicyValid(id.Intentions, false) { - return fmt.Errorf("Invalid identity_prefix intentions policy: %#v", id) - } - if err := id.EnterpriseRule.Validate(id.Policy, conf); err != nil { - return fmt.Errorf("Invalid identity_prefix enterprise policy: %#v, got error: %v", id, err) - } - } - // Validate the key policy for _, kp := range pr.Keys { if !isPolicyValid(kp.Policy, true) { diff --git a/acl/policy_authorizer.go b/acl/policy_authorizer.go index 11d19609efde9..e87635a036df1 100644 --- a/acl/policy_authorizer.go +++ b/acl/policy_authorizer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -14,15 +14,9 @@ type policyAuthorizer struct { // agentRules contain the exact-match agent policies agentRules *radix.Tree - // identityRules contains the identity exact-match policies - identityRules *radix.Tree - // intentionRules contains the service intention exact-match policies intentionRules *radix.Tree - // trafficPermissionsRules contains the service intention exact-match policies - trafficPermissionsRules *radix.Tree - // keyRules contains the key exact-match policies keyRules *radix.Tree @@ -186,48 +180,6 @@ func (p *policyAuthorizer) loadRules(policy *PolicyRules) error { } } - // Load the identity policy (exact matches) - for _, id := range policy.Identities { - if err := insertPolicyIntoRadix(id.Name, id.Policy, &id.EnterpriseRule, p.identityRules, false); err != nil { - return err - } - - intention := id.Intentions - if intention == "" { - switch id.Policy { - case PolicyRead, PolicyWrite: - intention = PolicyRead - default: - intention = PolicyDeny - } - } - - if err := insertPolicyIntoRadix(id.Name, intention, &id.EnterpriseRule, p.trafficPermissionsRules, false); err != nil { - return err - } - } - - // Load the identity policy (prefix matches) - for _, id := range policy.IdentityPrefixes { - if err := insertPolicyIntoRadix(id.Name, id.Policy, &id.EnterpriseRule, p.identityRules, true); err != nil { - return err - } - - intention := id.Intentions - if intention == "" { - switch id.Policy { - case PolicyRead, PolicyWrite: - intention = PolicyRead - default: - intention = PolicyDeny - } - } - - if err := insertPolicyIntoRadix(id.Name, intention, &id.EnterpriseRule, p.trafficPermissionsRules, true); err != nil { - return err - } - } - // Load the key policy (exact matches) for _, kp := range policy.Keys { if err := insertPolicyIntoRadix(kp.Prefix, kp.Policy, &kp.EnterpriseRule, p.keyRules, false); err != nil { @@ -396,16 +348,14 @@ func newPolicyAuthorizer(policies []*Policy, ent *Config) (*policyAuthorizer, er func newPolicyAuthorizerFromRules(rules *PolicyRules, ent *Config) (*policyAuthorizer, error) { p := &policyAuthorizer{ - agentRules: radix.New(), - identityRules: radix.New(), - intentionRules: radix.New(), - trafficPermissionsRules: radix.New(), - keyRules: radix.New(), - nodeRules: radix.New(), - serviceRules: radix.New(), - sessionRules: radix.New(), - eventRules: radix.New(), - preparedQueryRules: radix.New(), + agentRules: radix.New(), + intentionRules: radix.New(), + keyRules: radix.New(), + nodeRules: radix.New(), + serviceRules: radix.New(), + sessionRules: radix.New(), + eventRules: radix.New(), + preparedQueryRules: radix.New(), } p.enterprisePolicyAuthorizer.init(ent) @@ -578,33 +528,6 @@ func (p *policyAuthorizer) EventWrite(name string, _ *AuthorizerContext) Enforce return Default } -// IdentityRead checks for permission to read a given workload identity. -func (p *policyAuthorizer) IdentityRead(name string, _ *AuthorizerContext) EnforcementDecision { - if rule, ok := getPolicy(name, p.identityRules); ok { - return enforce(rule.access, AccessRead) - } - return Default -} - -// IdentityReadAll checks for permission to read all workload identities. -func (p *policyAuthorizer) IdentityReadAll(_ *AuthorizerContext) EnforcementDecision { - return p.allAllowed(p.identityRules, AccessRead) -} - -// IdentityWrite checks for permission to create or update a given -// workload identity. -func (p *policyAuthorizer) IdentityWrite(name string, _ *AuthorizerContext) EnforcementDecision { - if rule, ok := getPolicy(name, p.identityRules); ok { - return enforce(rule.access, AccessWrite) - } - return Default -} - -// IdentityWriteAny checks for write permission on any workload identity. -func (p *policyAuthorizer) IdentityWriteAny(_ *AuthorizerContext) EnforcementDecision { - return p.anyAllowed(p.identityRules, AccessWrite) -} - // IntentionDefaultAllow returns whether the default behavior when there are // no matching intentions is to allow or deny. func (p *policyAuthorizer) IntentionDefaultAllow(_ *AuthorizerContext) EnforcementDecision { @@ -612,7 +535,8 @@ func (p *policyAuthorizer) IntentionDefaultAllow(_ *AuthorizerContext) Enforceme return Default } -// IntentionRead checks if reading an intention is allowed. +// IntentionRead checks if writing (creating, updating, or deleting) of an +// intention is allowed. func (p *policyAuthorizer) IntentionRead(prefix string, _ *AuthorizerContext) EnforcementDecision { if prefix == "*" { return p.anyAllowed(p.intentionRules, AccessRead) @@ -637,31 +561,6 @@ func (p *policyAuthorizer) IntentionWrite(prefix string, _ *AuthorizerContext) E return Default } -// TrafficPermissionsRead checks if reading of traffic permissions is allowed. -func (p *policyAuthorizer) TrafficPermissionsRead(prefix string, _ *AuthorizerContext) EnforcementDecision { - if prefix == "*" { - return p.anyAllowed(p.trafficPermissionsRules, AccessRead) - } - - if rule, ok := getPolicy(prefix, p.trafficPermissionsRules); ok { - return enforce(rule.access, AccessRead) - } - return Default -} - -// TrafficPermissionsWrite checks if writing (creating, updating, or deleting) of traffic -// permissions is allowed. -func (p *policyAuthorizer) TrafficPermissionsWrite(prefix string, _ *AuthorizerContext) EnforcementDecision { - if prefix == "*" { - return p.allAllowed(p.trafficPermissionsRules, AccessWrite) - } - - if rule, ok := getPolicy(prefix, p.trafficPermissionsRules); ok { - return enforce(rule.access, AccessWrite) - } - return Default -} - // KeyRead returns if a key is allowed to be read func (p *policyAuthorizer) KeyRead(key string, _ *AuthorizerContext) EnforcementDecision { if rule, ok := getPolicy(key, p.keyRules); ok { @@ -712,7 +611,7 @@ func (p *policyAuthorizer) KeyWritePrefix(prefix string, _ *AuthorizerContext) E // that do NOT grant AccessWrite. // // Conditions for Default: - // * There is no prefix match rule that would apply to the given prefix. + // * There is no prefix match rule that would appy to the given prefix. // AND // * There are no rules (exact or prefix match) within/under the given prefix // that would NOT grant AccessWrite. @@ -916,62 +815,6 @@ func (p *policyAuthorizer) ServiceReadAll(_ *AuthorizerContext) EnforcementDecis return p.allAllowed(p.serviceRules, AccessRead) } -// ServiceReadPrefix determines whether service read is allowed within the given prefix. -// -// Access is allowed iff all the following are true: -// - There's a read policy for the longest prefix that's shorter or equal to the provided prefix. -// - There's no deny policy for any prefix that's longer than the given prefix. -// - There's no deny policy for any exact match that's within the given prefix. -func (p *policyAuthorizer) ServiceReadPrefix(prefix string, _ *AuthorizerContext) EnforcementDecision { - access := Default - - // 1. Walk the prefix tree from root to the given prefix. Find the longest prefix matching ours, - // and use that policy to determine our access as that is the most specific prefix, and it - // should take precedence. - p.serviceRules.WalkPath(prefix, func(path string, leaf interface{}) bool { - rule := leaf.(*policyAuthorizerRadixLeaf) - - if rule.prefix != nil { - switch rule.prefix.access { - case AccessRead, AccessWrite: - access = Allow - default: - access = Deny - } - } - - // Don't stop iteration because we want to visit all nodes down to our leaf to find the more specific match - // as it should take precedence. - return false - }) - - // 2. Check rules "below" the given prefix. Access is allowed if there's no deny policy - // for any prefix longer than ours or for any exact match that's within the prefix. - p.serviceRules.WalkPrefix(prefix, func(path string, leaf interface{}) bool { - rule := leaf.(*policyAuthorizerRadixLeaf) - - if rule.prefix != nil && (rule.prefix.access != AccessRead && rule.prefix.access != AccessWrite) { - // If any prefix longer than the provided prefix has "deny" policy, then access is denied. - access = Deny - - // We don't need to look at the rest of the tree in this case, so terminate early. - return true - } - - if rule.exact != nil && (rule.exact.access != AccessRead && rule.exact.access != AccessWrite) { - // If any exact match policy has an explicit deny, then access is denied. - access = Deny - - // We don't need to look at the rest of the tree in this case, so terminate early. - return true - } - - return false - }) - - return access -} - // ServiceWrite checks if writing (registering) a service is allowed func (p *policyAuthorizer) ServiceWrite(name string, _ *AuthorizerContext) EnforcementDecision { if rule, ok := getPolicy(name, p.serviceRules); ok { diff --git a/acl/policy_authorizer_ce.go b/acl/policy_authorizer_ce.go index 34f8f1bf947dd..89708a5be9c31 100644 --- a/acl/policy_authorizer_ce.go +++ b/acl/policy_authorizer_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package acl diff --git a/acl/policy_authorizer_test.go b/acl/policy_authorizer_test.go index 96272d8b12f4b..1c6959527899d 100644 --- a/acl/policy_authorizer_test.go +++ b/acl/policy_authorizer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -41,9 +41,6 @@ func TestPolicyAuthorizer(t *testing.T) { {name: "DefaultAgentWrite", prefix: "foo", check: checkDefaultAgentWrite}, {name: "DefaultEventRead", prefix: "foo", check: checkDefaultEventRead}, {name: "DefaultEventWrite", prefix: "foo", check: checkDefaultEventWrite}, - {name: "DefaultIdentityRead", prefix: "foo", check: checkDefaultIdentityRead}, - {name: "DefaultIdentityWrite", prefix: "foo", check: checkDefaultIdentityWrite}, - {name: "DefaultIdentityWriteAny", prefix: "", check: checkDefaultIdentityWriteAny}, {name: "DefaultIntentionDefaultAllow", prefix: "foo", check: checkDefaultIntentionDefaultAllow}, {name: "DefaultIntentionRead", prefix: "foo", check: checkDefaultIntentionRead}, {name: "DefaultIntentionWrite", prefix: "foo", check: checkDefaultIntentionWrite}, @@ -64,8 +61,6 @@ func TestPolicyAuthorizer(t *testing.T) { {name: "DefaultPreparedQueryRead", prefix: "foo", check: checkDefaultPreparedQueryRead}, {name: "DefaultPreparedQueryWrite", prefix: "foo", check: checkDefaultPreparedQueryWrite}, {name: "DefaultServiceRead", prefix: "foo", check: checkDefaultServiceRead}, - {name: "DefaultServiceReadAll", prefix: "foo", check: checkDefaultServiceReadAll}, - {name: "DefaultServiceReadPrefix", prefix: "foo", check: checkDefaultServiceReadPrefix}, {name: "DefaultServiceWrite", prefix: "foo", check: checkDefaultServiceWrite}, {name: "DefaultServiceWriteAny", prefix: "", check: checkDefaultServiceWriteAny}, {name: "DefaultSessionRead", prefix: "foo", check: checkDefaultSessionRead}, @@ -190,29 +185,6 @@ func TestPolicyAuthorizer(t *testing.T) { Policy: PolicyRead, }, }, - Identities: []*IdentityRule{ - { - Name: "foo", - Policy: PolicyWrite, - Intentions: PolicyWrite, - }, - { - Name: "football", - Policy: PolicyDeny, - }, - }, - IdentityPrefixes: []*IdentityRule{ - { - Name: "foot", - Policy: PolicyRead, - Intentions: PolicyRead, - }, - { - Name: "fo", - Policy: PolicyRead, - Intentions: PolicyRead, - }, - }, Keys: []*KeyRule{ { Prefix: "foo", @@ -398,23 +370,21 @@ func TestPolicyAuthorizer(t *testing.T) { {name: "ServiceReadDenied", prefix: "football", check: checkDenyServiceRead}, {name: "ServiceWriteDenied", prefix: "football", check: checkDenyServiceWrite}, {name: "ServiceWriteAnyAllowed", prefix: "", check: checkAllowServiceWriteAny}, - {name: "ServiceReadWithinPrefixDenied", prefix: "foot", check: checkDenyServiceReadPrefix}, - {name: "IdentityReadPrefixAllowed", prefix: "fo", check: checkAllowIdentityRead}, - {name: "IdentityWritePrefixDenied", prefix: "fo", check: checkDenyIdentityWrite}, - {name: "IdentityReadPrefixAllowed", prefix: "for", check: checkAllowIdentityRead}, - {name: "IdentityWritePrefixDenied", prefix: "for", check: checkDenyIdentityWrite}, - {name: "IdentityReadAllowed", prefix: "foo", check: checkAllowIdentityRead}, - {name: "IdentityWriteAllowed", prefix: "foo", check: checkAllowIdentityWrite}, - {name: "IdentityReadPrefixAllowed", prefix: "foot", check: checkAllowIdentityRead}, - {name: "IdentityWritePrefixDenied", prefix: "foot", check: checkDenyIdentityWrite}, - {name: "IdentityReadPrefixAllowed", prefix: "foot2", check: checkAllowIdentityRead}, - {name: "IdentityWritePrefixDenied", prefix: "foot2", check: checkDenyIdentityWrite}, - {name: "IdentityReadPrefixAllowed", prefix: "food", check: checkAllowIdentityRead}, - {name: "IdentityWritePrefixDenied", prefix: "food", check: checkDenyIdentityWrite}, - {name: "IdentityReadDenied", prefix: "football", check: checkDenyIdentityRead}, - {name: "IdentityWriteDenied", prefix: "football", check: checkDenyIdentityWrite}, - {name: "IdentityWriteAnyAllowed", prefix: "", check: checkAllowIdentityWriteAny}, + {name: "NodeReadPrefixAllowed", prefix: "fo", check: checkAllowNodeRead}, + {name: "NodeWritePrefixDenied", prefix: "fo", check: checkDenyNodeWrite}, + {name: "NodeReadPrefixAllowed", prefix: "for", check: checkAllowNodeRead}, + {name: "NodeWritePrefixDenied", prefix: "for", check: checkDenyNodeWrite}, + {name: "NodeReadAllowed", prefix: "foo", check: checkAllowNodeRead}, + {name: "NodeWriteAllowed", prefix: "foo", check: checkAllowNodeWrite}, + {name: "NodeReadPrefixAllowed", prefix: "foot", check: checkAllowNodeRead}, + {name: "NodeWritePrefixDenied", prefix: "foot", check: checkDenyNodeWrite}, + {name: "NodeReadPrefixAllowed", prefix: "foot2", check: checkAllowNodeRead}, + {name: "NodeWritePrefixDenied", prefix: "foot2", check: checkDenyNodeWrite}, + {name: "NodeReadPrefixAllowed", prefix: "food", check: checkAllowNodeRead}, + {name: "NodeWritePrefixDenied", prefix: "food", check: checkDenyNodeWrite}, + {name: "NodeReadDenied", prefix: "football", check: checkDenyNodeRead}, + {name: "NodeWriteDenied", prefix: "football", check: checkDenyNodeWrite}, {name: "IntentionReadPrefixAllowed", prefix: "fo", check: checkAllowIntentionRead}, {name: "IntentionWritePrefixDenied", prefix: "fo", check: checkDenyIntentionWrite}, @@ -573,214 +543,6 @@ func TestPolicyAuthorizer(t *testing.T) { {name: "AllDenied", prefix: "*", check: checkDenyIntentionWrite}, }, }, - "Service Read Prefix - read allowed with write policy and exact prefix": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "foo", - Policy: PolicyWrite, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixAllowed", prefix: "foo", check: checkAllowServiceReadPrefix}, - }, - }, - "Service Read Prefix - read allowed with read policy and exact prefix": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "foo", - Policy: PolicyRead, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixAllowed", prefix: "foo", check: checkAllowServiceReadPrefix}, - }, - }, - "Service Read Prefix - read denied with deny policy and exact prefix": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "foo", - Policy: PolicyDeny, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixDenied", prefix: "foo", check: checkDenyServiceReadPrefix}, - }, - }, - "Service Read Prefix - read allowed with write policy and shorter prefix": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "foo", - Policy: PolicyWrite, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixAllowed", prefix: "foo1", check: checkAllowServiceReadPrefix}, - }, - }, - "Service Read Prefix - read allowed with read policy and shorter prefix": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "foo", - Policy: PolicyRead, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixAllowed", prefix: "foo1", check: checkAllowServiceReadPrefix}, - }, - }, - "Service Read Prefix - read denied with deny policy and shorter prefix": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "foo", - Policy: PolicyDeny, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixDenied", prefix: "foo1", check: checkDenyServiceReadPrefix}, - }, - }, - "Service Read Prefix - default with write policy and longer prefix": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "foo1", - Policy: PolicyWrite, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixDefault", prefix: "foo", check: checkDefaultServiceReadPrefix}, - }, - }, - "Service Read Prefix - default with read policy and longer prefix": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "foo1", - Policy: PolicyRead, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixDefault", prefix: "foo", check: checkDefaultServiceReadPrefix}, - }, - }, - "Service Read Prefix - deny with deny policy and longer prefix": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "foo1", - Policy: PolicyDeny, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixDenied", prefix: "foo", check: checkDenyServiceReadPrefix}, - }, - }, - "Service Read Prefix - allow with two shorter prefixes - more specific one allowing read and less specific denying": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "fo", - Policy: PolicyDeny, - }, - { - Name: "foo", - Policy: PolicyRead, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixAllowed", prefix: "foo", check: checkAllowServiceReadPrefix}, - }, - }, - "Service Read Prefix - deny with two shorter prefixes - more specific one denying and less specific allowing read": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "fo", - Policy: PolicyRead, - }, - { - Name: "foo", - Policy: PolicyDeny, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixDenied", prefix: "foo", check: checkDenyServiceReadPrefix}, - }, - }, - "Service Read Prefix - deny with exact match denying": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "fo", - Policy: PolicyRead, - }, - }, - Services: []*ServiceRule{ - { - Name: "foo-123", - Policy: PolicyDeny, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixDenied", prefix: "foo", check: checkDenyServiceReadPrefix}, - }, - }, - "Service Read Prefix - allow with exact match allowing read": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "fo", - Policy: PolicyRead, - }, - }, - Services: []*ServiceRule{ - { - Name: "foo-123", - Policy: PolicyRead, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixAllowed", prefix: "foo", check: checkAllowServiceReadPrefix}, - }, - }, - "Service Read Prefix - deny with exact match allowing read but prefix match denying": { - policy: &Policy{PolicyRules: PolicyRules{ - ServicePrefixes: []*ServiceRule{ - { - Name: "fo", - Policy: PolicyDeny, - }, - }, - Services: []*ServiceRule{ - { - Name: "foo-123", - Policy: PolicyRead, - }, - }, - }}, - checks: []aclCheck{ - {name: "ServiceReadPrefixDenied", prefix: "foo", check: checkDenyServiceReadPrefix}, - }, - }, } for name, tcase := range cases { diff --git a/acl/policy_ce.go b/acl/policy_ce.go index fe139ef7ab757..b33c3243364ba 100644 --- a/acl/policy_ce.go +++ b/acl/policy_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package acl diff --git a/acl/policy_merger.go b/acl/policy_merger.go index 83166e167dd05..df065a9cb1b95 100644 --- a/acl/policy_merger.go +++ b/acl/policy_merger.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -9,8 +9,6 @@ type policyRulesMergeContext struct { agentPrefixRules map[string]*AgentRule eventRules map[string]*EventRule eventPrefixRules map[string]*EventRule - identityRules map[string]*IdentityRule - identityPrefixRules map[string]*IdentityRule keyringRule string keyRules map[string]*KeyRule keyPrefixRules map[string]*KeyRule @@ -35,8 +33,6 @@ func (p *policyRulesMergeContext) init() { p.agentPrefixRules = make(map[string]*AgentRule) p.eventRules = make(map[string]*EventRule) p.eventPrefixRules = make(map[string]*EventRule) - p.identityRules = make(map[string]*IdentityRule) - p.identityPrefixRules = make(map[string]*IdentityRule) p.keyringRule = "" p.keyRules = make(map[string]*KeyRule) p.keyPrefixRules = make(map[string]*KeyRule) @@ -102,42 +98,6 @@ func (p *policyRulesMergeContext) merge(policy *PolicyRules) { } } - for _, id := range policy.Identities { - existing, found := p.identityRules[id.Name] - - if !found { - p.identityRules[id.Name] = id - continue - } - - if takesPrecedenceOver(id.Policy, existing.Policy) { - existing.Policy = id.Policy - existing.EnterpriseRule = id.EnterpriseRule - } - - if takesPrecedenceOver(id.Intentions, existing.Intentions) { - existing.Intentions = id.Intentions - } - } - - for _, id := range policy.IdentityPrefixes { - existing, found := p.identityPrefixRules[id.Name] - - if !found { - p.identityPrefixRules[id.Name] = id - continue - } - - if takesPrecedenceOver(id.Policy, existing.Policy) { - existing.Policy = id.Policy - existing.EnterpriseRule = id.EnterpriseRule - } - - if takesPrecedenceOver(id.Intentions, existing.Intentions) { - existing.Intentions = id.Intentions - } - } - if takesPrecedenceOver(policy.Keyring, p.keyringRule) { p.keyringRule = policy.Keyring } @@ -309,16 +269,6 @@ func (p *policyRulesMergeContext) fill(merged *PolicyRules) { merged.EventPrefixes = append(merged.EventPrefixes, policy) } - merged.Identities = []*IdentityRule{} - for _, policy := range p.identityRules { - merged.Identities = append(merged.Identities, policy) - } - - merged.IdentityPrefixes = []*IdentityRule{} - for _, policy := range p.identityPrefixRules { - merged.IdentityPrefixes = append(merged.IdentityPrefixes, policy) - } - merged.Keys = []*KeyRule{} for _, policy := range p.keyRules { merged.Keys = append(merged.Keys, policy) diff --git a/acl/policy_merger_ce.go b/acl/policy_merger_ce.go index 4738314a6c1dd..b221f25875399 100644 --- a/acl/policy_merger_ce.go +++ b/acl/policy_merger_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package acl diff --git a/acl/policy_test.go b/acl/policy_test.go index 599c8c977e1b9..ac23e3c0df3b0 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -42,12 +42,6 @@ func TestPolicySourceParse(t *testing.T) { event "bar" { policy = "deny" } - identity_prefix "" { - policy = "write" - } - identity "foo" { - policy = "read" - } key_prefix "" { policy = "read" } @@ -123,16 +117,6 @@ func TestPolicySourceParse(t *testing.T) { "policy": "deny" } }, - "identity_prefix": { - "": { - "policy": "write" - } - }, - "identity": { - "foo": { - "policy": "read" - } - }, "key_prefix": { "": { "policy": "read" @@ -233,18 +217,6 @@ func TestPolicySourceParse(t *testing.T) { Policy: PolicyDeny, }, }, - IdentityPrefixes: []*IdentityRule{ - { - Name: "", - Policy: PolicyWrite, - }, - }, - Identities: []*IdentityRule{ - { - Name: "foo", - Policy: PolicyRead, - }, - }, Keyring: PolicyDeny, KeyPrefixes: []*KeyRule{ { @@ -331,39 +303,6 @@ func TestPolicySourceParse(t *testing.T) { }, }}, }, - { - Name: "Identity No Intentions", - Rules: `identity "foo" { policy = "write" }`, - RulesJSON: `{ "identity": { "foo": { "policy": "write" }}}`, - Expected: &Policy{PolicyRules: PolicyRules{ - Identities: []*IdentityRule{ - { - Name: "foo", - Policy: "write", - }, - }, - }}, - }, - { - Name: "Identity Intentions", - Rules: `identity "foo" { policy = "write" intentions = "read" }`, - RulesJSON: `{ "identity": { "foo": { "policy": "write", "intentions": "read" }}}`, - Expected: &Policy{PolicyRules: PolicyRules{ - Identities: []*IdentityRule{ - { - Name: "foo", - Policy: "write", - Intentions: "read", - }, - }, - }}, - }, - { - Name: "Identity Intention: invalid value", - Rules: `identity "foo" { policy = "write" intentions = "foo" }`, - RulesJSON: `{ "identity": { "foo": { "policy": "write", "intentions": "foo" }}}`, - Err: "Invalid identity intentions policy", - }, { Name: "Service No Intentions", Rules: `service "foo" { policy = "write" }`, @@ -415,18 +354,6 @@ func TestPolicySourceParse(t *testing.T) { RulesJSON: `{ "agent_prefix": { "foo": { "policy": "nope" }}}`, Err: "Invalid agent_prefix policy", }, - { - Name: "Bad Policy - Identity", - Rules: `identity "foo" { policy = "nope" }`, - RulesJSON: `{ "identity": { "foo": { "policy": "nope" }}}`, - Err: "Invalid identity policy", - }, - { - Name: "Bad Policy - Identity Prefix", - Rules: `identity_prefix "foo" { policy = "nope" }`, - RulesJSON: `{ "identity_prefix": { "foo": { "policy": "nope" }}}`, - Err: "Invalid identity_prefix policy", - }, { Name: "Bad Policy - Key", Rules: `key "foo" { policy = "nope" }`, @@ -758,109 +685,6 @@ func TestMergePolicies(t *testing.T) { }, }}, }, - { - name: "Identities", - input: []*Policy{ - {PolicyRules: PolicyRules{ - Identities: []*IdentityRule{ - { - Name: "foo", - Policy: PolicyWrite, - Intentions: PolicyWrite, - }, - { - Name: "bar", - Policy: PolicyRead, - Intentions: PolicyRead, - }, - { - Name: "baz", - Policy: PolicyWrite, - Intentions: PolicyWrite, - }, - }, - IdentityPrefixes: []*IdentityRule{ - { - Name: "000", - Policy: PolicyWrite, - Intentions: PolicyWrite, - }, - { - Name: "111", - Policy: PolicyRead, - Intentions: PolicyRead, - }, - { - Name: "222", - Policy: PolicyWrite, - Intentions: PolicyWrite, - }, - }, - }}, - {PolicyRules: PolicyRules{ - Identities: []*IdentityRule{ - { - Name: "foo", - Policy: PolicyRead, - Intentions: PolicyRead, - }, - { - Name: "baz", - Policy: PolicyDeny, - Intentions: PolicyDeny, - }, - }, - IdentityPrefixes: []*IdentityRule{ - { - Name: "000", - Policy: PolicyRead, - Intentions: PolicyRead, - }, - { - Name: "222", - Policy: PolicyDeny, - Intentions: PolicyDeny, - }, - }, - }}, - }, - expected: &Policy{PolicyRules: PolicyRules{ - Identities: []*IdentityRule{ - { - Name: "foo", - Policy: PolicyWrite, - Intentions: PolicyWrite, - }, - { - Name: "bar", - Policy: PolicyRead, - Intentions: PolicyRead, - }, - { - Name: "baz", - Policy: PolicyDeny, - Intentions: PolicyDeny, - }, - }, - IdentityPrefixes: []*IdentityRule{ - { - Name: "000", - Policy: PolicyWrite, - Intentions: PolicyWrite, - }, - { - Name: "111", - Policy: PolicyRead, - Intentions: PolicyRead, - }, - { - Name: "222", - Policy: PolicyDeny, - Intentions: PolicyDeny, - }, - }, - }}, - }, { name: "Node", input: []*Policy{ diff --git a/acl/resolver/danger.go b/acl/resolver/danger.go index 29b4f35ac1d0f..a72efa9278449 100644 --- a/acl/resolver/danger.go +++ b/acl/resolver/danger.go @@ -1,19 +1,15 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resolver -import ( - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/structs" -) +import "github.com/hashicorp/consul/acl" // DANGER_NO_AUTH implements an ACL resolver short-circuit authorization in // cases where it is handled somewhere else or expressly not required. type DANGER_NO_AUTH struct{} // ResolveTokenAndDefaultMeta returns an authorizer with unfettered permissions. -func (DANGER_NO_AUTH) ResolveTokenAndDefaultMeta(_ string, entMeta *acl.EnterpriseMeta, _ *acl.AuthorizerContext) (Result, error) { - entMeta.Merge(structs.DefaultEnterpriseMetaInDefaultPartition()) +func (DANGER_NO_AUTH) ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (Result, error) { return Result{Authorizer: acl.ManageAll()}, nil } diff --git a/acl/resolver/result.go b/acl/resolver/result.go index 1e52b1c573168..190d15eca5b8a 100644 --- a/acl/resolver/result.go +++ b/acl/resolver/result.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resolver diff --git a/acl/static_authorizer.go b/acl/static_authorizer.go index 759b378669ad8..a6678925695c9 100644 --- a/acl/static_authorizer.go +++ b/acl/static_authorizer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -75,34 +75,6 @@ func (s *staticAuthorizer) EventWrite(string, *AuthorizerContext) EnforcementDec return Deny } -func (s *staticAuthorizer) IdentityRead(string, *AuthorizerContext) EnforcementDecision { - if s.defaultAllow { - return Allow - } - return Deny -} - -func (s *staticAuthorizer) IdentityReadAll(*AuthorizerContext) EnforcementDecision { - if s.defaultAllow { - return Allow - } - return Deny -} - -func (s *staticAuthorizer) IdentityWrite(string, *AuthorizerContext) EnforcementDecision { - if s.defaultAllow { - return Allow - } - return Deny -} - -func (s *staticAuthorizer) IdentityWriteAny(*AuthorizerContext) EnforcementDecision { - if s.defaultAllow { - return Allow - } - return Deny -} - func (s *staticAuthorizer) IntentionDefaultAllow(*AuthorizerContext) EnforcementDecision { if s.defaultAllow { return Allow @@ -257,13 +229,6 @@ func (s *staticAuthorizer) ServiceReadAll(*AuthorizerContext) EnforcementDecisio return Deny } -func (s *staticAuthorizer) ServiceReadPrefix(string, *AuthorizerContext) EnforcementDecision { - if s.defaultAllow { - return Allow - } - return Deny -} - func (s *staticAuthorizer) ServiceWrite(string, *AuthorizerContext) EnforcementDecision { if s.defaultAllow { return Allow @@ -299,20 +264,6 @@ func (s *staticAuthorizer) Snapshot(_ *AuthorizerContext) EnforcementDecision { return Deny } -func (s *staticAuthorizer) TrafficPermissionsRead(string, *AuthorizerContext) EnforcementDecision { - if s.defaultAllow { - return Allow - } - return Deny -} - -func (s *staticAuthorizer) TrafficPermissionsWrite(string, *AuthorizerContext) EnforcementDecision { - if s.defaultAllow { - return Allow - } - return Deny -} - func (s *staticAuthorizer) ToAllowAuthorizer() AllowAuthorizer { return AllowAuthorizer{Authorizer: s} } diff --git a/acl/static_authorizer_test.go b/acl/static_authorizer_test.go index cdaf91ef71020..e94ac44e500f0 100644 --- a/acl/static_authorizer_test.go +++ b/acl/static_authorizer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl diff --git a/acl/testing.go b/acl/testing.go index ef4d0343c6b9e..1c67458b174f8 100644 --- a/acl/testing.go +++ b/acl/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl diff --git a/acl/validation.go b/acl/validation.go index 652a76e87930c..96119dcc0fbaa 100644 --- a/acl/validation.go +++ b/acl/validation.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -68,10 +68,6 @@ func IsValidRoleName(name string) bool { return validRoleName.MatchString(name) } -func IsValidPolicyName(name string) bool { - return ValidatePolicyName(name) == nil -} - // IsValidRoleName returns true if the provided name can be used as an // ACLAuthMethod Name. func IsValidAuthMethodName(name string) bool { diff --git a/acl/validation_test.go b/acl/validation_test.go index 3bf14719b12ae..d5d01e0e9054e 100644 --- a/acl/validation_test.go +++ b/acl/validation_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl diff --git a/agent/acl.go b/agent/acl.go index 0f64ee62c79e9..381f2c028e4fc 100644 --- a/agent/acl.go +++ b/agent/acl.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/acl_ce.go b/agent/acl_ce.go index 4ce9bb00642ab..aa505da1ef49b 100644 --- a/agent/acl_ce.go +++ b/agent/acl_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/acl_endpoint.go b/agent/acl_endpoint.go index ac773c59b443c..3a52b0cf54413 100644 --- a/agent/acl_endpoint.go +++ b/agent/acl_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -1133,146 +1133,3 @@ func (s *HTTPHandlers) ACLAuthorize(resp http.ResponseWriter, req *http.Request) return responses, nil } - -func (s *HTTPHandlers) ACLTemplatedPoliciesList(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - if s.checkACLDisabled() { - return nil, aclDisabled - } - - var token string - s.parseToken(req, &token) - - var entMeta acl.EnterpriseMeta - if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { - return nil, err - } - - s.defaultMetaPartitionToAgent(&entMeta) - var authzContext acl.AuthorizerContext - authz, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, &entMeta, &authzContext) - if err != nil { - return nil, err - } - - // Only ACLRead privileges are required to list templated policies - if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil { - return nil, err - } - - templatedPolicies := make(map[string]api.ACLTemplatedPolicyResponse) - - for tp, tmpBase := range structs.GetACLTemplatedPolicyList() { - templatedPolicies[tp] = api.ACLTemplatedPolicyResponse{ - TemplateName: tmpBase.TemplateName, - Schema: tmpBase.Schema, - Template: tmpBase.Template, - Description: tmpBase.Description, - } - } - - return templatedPolicies, nil -} - -func (s *HTTPHandlers) ACLTemplatedPolicyRead(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - if s.checkACLDisabled() { - return nil, aclDisabled - } - - templateName := strings.TrimPrefix(req.URL.Path, "/v1/acl/templated-policy/name/") - if templateName == "" { - return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing templated policy Name"} - } - - var token string - s.parseToken(req, &token) - - var entMeta acl.EnterpriseMeta - if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { - return nil, err - } - - s.defaultMetaPartitionToAgent(&entMeta) - var authzContext acl.AuthorizerContext - authz, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, &entMeta, &authzContext) - if err != nil { - return nil, err - } - - // Only ACLRead privileges are required to read templated policies - if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil { - return nil, err - } - - baseTemplate, ok := structs.GetACLTemplatedPolicyBase(templateName) - if !ok { - return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid templated policy Name: %s", templateName)} - } - - return api.ACLTemplatedPolicyResponse{ - TemplateName: baseTemplate.TemplateName, - Schema: baseTemplate.Schema, - Template: baseTemplate.Template, - Description: baseTemplate.Description, - }, nil -} - -func (s *HTTPHandlers) ACLTemplatedPolicyPreview(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - if s.checkACLDisabled() { - return nil, aclDisabled - } - - templateName := strings.TrimPrefix(req.URL.Path, "/v1/acl/templated-policy/preview/") - if templateName == "" { - return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing templated policy Name"} - } - - var token string - s.parseToken(req, &token) - - var entMeta acl.EnterpriseMeta - if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { - return nil, err - } - - s.defaultMetaPartitionToAgent(&entMeta) - var authzContext acl.AuthorizerContext - authz, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, &entMeta, &authzContext) - if err != nil { - return nil, err - } - - // Only ACLRead privileges are required to read/preview templated policies - if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil { - return nil, err - } - - baseTemplate, ok := structs.GetACLTemplatedPolicyBase(templateName) - if !ok { - return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("templated policy %q does not exist", templateName)} - } - - var tpRequest structs.ACLTemplatedPolicyVariables - - if err := decodeBody(req.Body, &tpRequest); err != nil { - return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Failed to decode request body: %s", err.Error())} - } - - templatedPolicy := structs.ACLTemplatedPolicy{ - TemplateID: baseTemplate.TemplateID, - TemplateName: baseTemplate.TemplateName, - TemplateVariables: &tpRequest, - } - - err = templatedPolicy.ValidateTemplatedPolicy(baseTemplate.Schema) - if err != nil { - return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("validation error for templated policy: %q: %s", templatedPolicy.TemplateName, err.Error())} - } - - renderedPolicy, err := templatedPolicy.SyntheticPolicy(&entMeta) - - if err != nil { - return nil, HTTPError{StatusCode: http.StatusInternalServerError, Reason: fmt.Sprintf("Failed to generate synthetic policy: %q: %s", templatedPolicy.TemplateName, err.Error())} - } - - return renderedPolicy, nil -} diff --git a/agent/acl_endpoint_test.go b/agent/acl_endpoint_test.go index 0656b0882d5b2..cd800ef42b2bd 100644 --- a/agent/acl_endpoint_test.go +++ b/agent/acl_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -21,7 +21,6 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/authmethod/testauth" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/testrpc" @@ -1361,123 +1360,6 @@ func TestACL_HTTP(t *testing.T) { require.Len(t, token.ServiceIdentities, 1) require.Equal(t, "sn1", token.ServiceIdentities[0].ServiceName) }) - - t.Run("List by ServiceName based on templated policies", func(t *testing.T) { - tokenInput := &structs.ACLToken{ - Description: "token for templated policies service", - TemplatedPolicies: []*structs.ACLTemplatedPolicy{ - { - TemplateName: "builtin/service", - TemplateVariables: &structs.ACLTemplatedPolicyVariables{ - Name: "service1", - }, - }, - }, - } - - req, _ := http.NewRequest("PUT", "/v1/acl/token", jsonBody(tokenInput)) - req.Header.Add("X-Consul-Token", "root") - resp := httptest.NewRecorder() - _, err := a.srv.ACLTokenCreate(resp, req) - require.NoError(t, err) - - req, _ = http.NewRequest("GET", "/v1/acl/tokens?servicename=service1", nil) - req.Header.Add("X-Consul-Token", "root") - resp = httptest.NewRecorder() - raw, err := a.srv.ACLTokenList(resp, req) - require.NoError(t, err) - tokens, ok := raw.(structs.ACLTokenListStubs) - require.True(t, ok) - require.Len(t, tokens, 1) - token := tokens[0] - require.Equal(t, "token for templated policies service", token.Description) - require.Len(t, token.TemplatedPolicies, 1) - require.Equal(t, "service1", token.TemplatedPolicies[0].TemplateVariables.Name) - }) - }) - - t.Run("ACLTemplatedPolicy", func(t *testing.T) { - t.Run("List", func(t *testing.T) { - req, _ := http.NewRequest("GET", "/v1/acl/templated-policies", nil) - req.Header.Add("X-Consul-Token", "root") - resp := httptest.NewRecorder() - a.srv.h.ServeHTTP(resp, req) - - require.Equal(t, http.StatusOK, resp.Code) - - var list map[string]api.ACLTemplatedPolicyResponse - require.NoError(t, json.NewDecoder(resp.Body).Decode(&list)) - require.Len(t, list, 7) - - require.Equal(t, api.ACLTemplatedPolicyResponse{ - TemplateName: api.ACLTemplatedPolicyServiceName, - Schema: structs.ACLTemplatedPolicyServiceSchema, - Template: structs.ACLTemplatedPolicyService, - Description: structs.ACLTemplatedPolicyServiceDescription, - }, list[api.ACLTemplatedPolicyServiceName]) - }) - t.Run("Read", func(t *testing.T) { - t.Run("With non existing templated policy", func(t *testing.T) { - req, _ := http.NewRequest("GET", "/v1/acl/templated-policy/name/fake", nil) - req.Header.Add("X-Consul-Token", "root") - resp := httptest.NewRecorder() - a.srv.h.ServeHTTP(resp, req) - require.Equal(t, http.StatusBadRequest, resp.Code) - }) - - t.Run("With existing templated policy", func(t *testing.T) { - req, _ := http.NewRequest("GET", "/v1/acl/templated-policy/name/"+api.ACLTemplatedPolicyDNSName, nil) - req.Header.Add("X-Consul-Token", "root") - resp := httptest.NewRecorder() - - a.srv.h.ServeHTTP(resp, req) - require.Equal(t, http.StatusOK, resp.Code) - - var templatedPolicy api.ACLTemplatedPolicyResponse - require.NoError(t, json.NewDecoder(resp.Body).Decode(&templatedPolicy)) - require.Equal(t, structs.ACLTemplatedPolicyNoRequiredVariablesSchema, templatedPolicy.Schema) - require.Equal(t, structs.ACLTemplatedPolicyDNSDescription, templatedPolicy.Description) - require.Equal(t, api.ACLTemplatedPolicyDNSName, templatedPolicy.TemplateName) - require.Equal(t, structs.ACLTemplatedPolicyDNS, templatedPolicy.Template) - }) - }) - t.Run("preview", func(t *testing.T) { - t.Run("When missing required variables", func(t *testing.T) { - previewInput := &structs.ACLTemplatedPolicyVariables{} - req, _ := http.NewRequest( - "POST", - fmt.Sprintf("/v1/acl/templated-policy/preview/%s", api.ACLTemplatedPolicyServiceName), - jsonBody(previewInput), - ) - req.Header.Add("X-Consul-Token", "root") - resp := httptest.NewRecorder() - - a.srv.h.ServeHTTP(resp, req) - require.Equal(t, http.StatusBadRequest, resp.Code) - }) - - t.Run("Correct input", func(t *testing.T) { - previewInput := &structs.ACLTemplatedPolicyVariables{Name: "web"} - req, _ := http.NewRequest( - "POST", - fmt.Sprintf("/v1/acl/templated-policy/preview/%s", api.ACLTemplatedPolicyServiceName), - jsonBody(previewInput), - ) - req.Header.Add("X-Consul-Token", "root") - resp := httptest.NewRecorder() - - a.srv.h.ServeHTTP(resp, req) - require.Equal(t, http.StatusOK, resp.Code) - - var syntheticPolicy *structs.ACLPolicy - require.NoError(t, json.NewDecoder(resp.Body).Decode(&syntheticPolicy)) - - require.NotEmpty(t, syntheticPolicy.ID) - require.NotEmpty(t, syntheticPolicy.Hash) - require.Equal(t, "synthetic policy generated from templated policy: builtin/service", syntheticPolicy.Description) - require.Contains(t, syntheticPolicy.Name, "synthetic-policy-") - }) - }) }) } @@ -2225,7 +2107,7 @@ func TestACL_Authorize(t *testing.T) { policyReq := structs.ACLPolicySetRequest{ Policy: structs.ACLPolicy{ Name: "test", - Rules: `acl = "read" operator = "write" identity_prefix "" { policy = "read"} service_prefix "" { policy = "read"} node_prefix "" { policy= "write" } key_prefix "/foo" { policy = "write" } `, + Rules: `acl = "read" operator = "write" service_prefix "" { policy = "read"} node_prefix "" { policy= "write" } key_prefix "/foo" { policy = "write" } `, }, Datacenter: "dc1", WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, @@ -2311,16 +2193,6 @@ func TestACL_Authorize(t *testing.T) { Segment: "foo", Access: "write", }, - { - Resource: "identity", - Segment: "foo", - Access: "read", - }, - { - Resource: "identity", - Segment: "foo", - Access: "write", - }, { Resource: "intention", Segment: "foo", @@ -2471,16 +2343,6 @@ func TestACL_Authorize(t *testing.T) { Segment: "foo", Access: "write", }, - { - Resource: "identity", - Segment: "foo", - Access: "read", - }, - { - Resource: "identity", - Segment: "foo", - Access: "write", - }, { Resource: "intention", Segment: "foo", @@ -2587,8 +2449,6 @@ func TestACL_Authorize(t *testing.T) { false, // agent:write false, // event:read false, // event:write - true, // identity:read - false, // identity:write true, // intentions:read false, // intention:write false, // key:read diff --git a/agent/acl_test.go b/agent/acl_test.go index 0958db8db6fb0..40662231ac367 100644 --- a/agent/acl_test.go +++ b/agent/acl_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -22,7 +22,6 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/types" @@ -164,9 +163,6 @@ func (a *TestACLAgent) Stats() map[string]map[string]string { func (a *TestACLAgent) ReloadConfig(_ consul.ReloadableConfig) error { return fmt.Errorf("Unimplemented") } -func (a *TestACLAgent) ResourceServiceClient() pbresource.ResourceServiceClient { - return nil -} func TestACL_Version8EnabledByDefault(t *testing.T) { t.Parallel() diff --git a/agent/ae/ae.go b/agent/ae/ae.go index f8b9a331d100c..8c4d8c9972966 100644 --- a/agent/ae/ae.go +++ b/agent/ae/ae.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // Package ae provides tools to synchronize state between local and remote consul servers. package ae @@ -81,9 +81,8 @@ type StateSyncer struct { SyncChanges *Trigger // paused stores whether sync runs are temporarily disabled. - pauseLock sync.Mutex - paused int - hardDisabled bool + pauseLock sync.Mutex + paused int // serverUpInterval is the max time after which a full sync is // performed when a server has been added to the cluster. @@ -152,20 +151,9 @@ const ( retryFullSyncState fsmState = "retryFullSync" ) -// HardDisableSync is like PauseSync but is one-way. It causes other -// Pause/Resume/Start operations to be completely ignored. -func (s *StateSyncer) HardDisableSync() { - s.pauseLock.Lock() - s.hardDisabled = true - s.pauseLock.Unlock() -} - // Run is the long running method to perform state synchronization // between local and remote servers. func (s *StateSyncer) Run() { - if s.Disabled() { - return - } if s.ClusterSize == nil { panic("ClusterSize not set") } @@ -341,14 +329,7 @@ func (s *StateSyncer) Pause() { func (s *StateSyncer) Paused() bool { s.pauseLock.Lock() defer s.pauseLock.Unlock() - return s.paused != 0 || s.hardDisabled -} - -// Disabled returns whether sync runs are permanently disabled. -func (s *StateSyncer) Disabled() bool { - s.pauseLock.Lock() - defer s.pauseLock.Unlock() - return s.hardDisabled + return s.paused != 0 } // Resume re-enables sync runs. It returns true if it was the last pause/resume @@ -359,7 +340,7 @@ func (s *StateSyncer) Resume() bool { if s.paused < 0 { panic("unbalanced pause/resume") } - trigger := s.paused == 0 && !s.hardDisabled + trigger := s.paused == 0 s.pauseLock.Unlock() if trigger { s.SyncChanges.Trigger() diff --git a/agent/ae/ae_test.go b/agent/ae/ae_test.go index 9e9593f4f92d2..873cd4128db34 100644 --- a/agent/ae/ae_test.go +++ b/agent/ae/ae_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ae diff --git a/agent/ae/trigger.go b/agent/ae/trigger.go index 29bdd988907eb..a320bda526d1f 100644 --- a/agent/ae/trigger.go +++ b/agent/ae/trigger.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ae diff --git a/agent/agent.go b/agent/agent.go index 98e1252cefaff..7a64b74bd1b18 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -48,7 +48,6 @@ import ( "github.com/hashicorp/consul/agent/consul" rpcRate "github.com/hashicorp/consul/agent/consul/rate" "github.com/hashicorp/consul/agent/consul/servercert" - "github.com/hashicorp/consul/agent/discovery" "github.com/hashicorp/consul/agent/dns" external "github.com/hashicorp/consul/agent/grpc-external" grpcDNS "github.com/hashicorp/consul/agent/grpc-external/services/dns" @@ -69,16 +68,12 @@ import ( "github.com/hashicorp/consul/agent/xds" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api/watch" - libdns "github.com/hashicorp/consul/internal/dnsutil" - proxytracker "github.com/hashicorp/consul/internal/mesh/proxy-tracker" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/file" "github.com/hashicorp/consul/lib/mutex" "github.com/hashicorp/consul/lib/routine" "github.com/hashicorp/consul/logging" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/proto/private/pbconfigentry" "github.com/hashicorp/consul/proto/private/pboperator" "github.com/hashicorp/consul/proto/private/pbpeering" "github.com/hashicorp/consul/tlsutil" @@ -206,9 +201,6 @@ type delegate interface { RPC(ctx context.Context, method string, args interface{}, reply interface{}) error - // ResourceServiceClient is a client for the gRPC Resource Service. - ResourceServiceClient() pbresource.ResourceServiceClient - SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error Shutdown() error Stats() map[string]map[string]string @@ -221,14 +213,6 @@ type notifier interface { Notify(string) error } -// dnsServer abstracts the V1 and V2 implementations of the DNS server. -type dnsServer interface { - GetAddr() string - ListenAndServe(string, string, func()) error - ReloadConfig(*config.RuntimeConfig) error - Shutdown() -} - // Agent is the long running process that is run on every machine. // It exposes an RPC interface that is used by the CLI to control the // agent. The agent runs the query interfaces like HTTP, DNS, and RPC. @@ -351,11 +335,7 @@ type Agent struct { endpointsLock sync.RWMutex // dnsServer provides the DNS API - dnsServers []dnsServer - - // catalogDataFetcher is used as an interface to the catalog for service discovery - // (aka DNS). Only applicable to the V2 DNS server (agent/dns). - catalogDataFetcher discovery.CatalogDataFetcher + dnsServers []*DNSServer // apiServers listening for connections. If any of these server goroutines // fail, the agent will be shutdown. @@ -416,11 +396,10 @@ type Agent struct { // they can update their internal state. configReloaders []ConfigReloader - // TODO: pass directly to HTTPHandlers and dnsServer once those are passed + // TODO: pass directly to HTTPHandlers and DNSServer once those are passed // into Agent, which will allow us to remove this field. - rpcClientHealth *health.Client - rpcClientConfigEntry *configentry.Client - grpcClientConfigEntry pbconfigentry.ConfigEntryServiceClient + rpcClientHealth *health.Client + rpcClientConfigEntry *configentry.Client rpcClientPeering pbpeering.PeeringServiceClient @@ -523,7 +502,6 @@ func New(bd BaseDeps) (*Agent, error) { a.rpcClientPeering = pbpeering.NewPeeringServiceClient(conn) a.rpcClientOperator = pboperator.NewOperatorServiceClient(conn) - a.grpcClientConfigEntry = pbconfigentry.NewConfigEntryServiceClient(conn) a.serviceManager = NewServiceManager(&a) a.rpcClientConfigEntry = &configentry.Client{ @@ -644,9 +622,6 @@ func (a *Agent) Start(ctx context.Context) error { // create the state synchronization manager which performs // regular and on-demand state synchronizations (anti-entropy). a.sync = ae.NewStateSyncer(a.State, c.AEInterval, a.shutdownCh, a.logger) - if a.baseDeps.UseV2Resources() { - a.sync.HardDisableSync() - } err = validateFIPSConfig(a.config) if err != nil { @@ -678,12 +653,7 @@ func (a *Agent) Start(ctx context.Context) error { return fmt.Errorf("failed to start Consul enterprise component: %v", err) } - // proxyTracker will be used in the creation of the XDS server and also - // in the registration of the v2 xds controller - var proxyTracker *proxytracker.ProxyTracker - // Setup either the client or the server. - var consulServer *consul.Server if c.ServerMode { serverLogger := a.baseDeps.Logger.NamedIntercept(logging.ConsulServer) @@ -718,21 +688,14 @@ func (a *Agent) Start(ctx context.Context) error { Time: a.config.GRPCKeepaliveInterval, Timeout: a.config.GRPCKeepaliveTimeout, }, - nil, ) - if a.baseDeps.UseV2Resources() { - proxyTracker = proxytracker.NewProxyTracker(proxytracker.ProxyTrackerConfig{ - Logger: a.logger.Named("proxy-tracker"), - SessionLimiter: a.baseDeps.XDSStreamLimiter, - }) - } - consulServer, err = consul.NewServer(consulCfg, a.baseDeps.Deps, a.externalGRPCServer, incomingRPCLimiter, serverLogger, proxyTracker) + server, err := consul.NewServer(consulCfg, a.baseDeps.Deps, a.externalGRPCServer, incomingRPCLimiter, serverLogger) if err != nil { return fmt.Errorf("Failed to start Consul server: %v", err) } - incomingRPCLimiter.Register(consulServer) - a.delegate = consulServer + incomingRPCLimiter.Register(server) + a.delegate = server if a.config.PeeringEnabled && a.config.ConnectEnabled { d := servercert.Deps{ @@ -742,7 +705,7 @@ func (a *Agent) Start(ctx context.Context) error { ACLsEnabled: a.config.ACLsEnabled, }, LeafCertManager: a.leafCertManager, - GetStore: func() servercert.Store { return consulServer.FSM().State() }, + GetStore: func() servercert.Store { return server.FSM().State() }, TLSConfigurator: a.tlsConfigurator, } a.certManager = servercert.NewCertManager(d) @@ -751,15 +714,6 @@ func (a *Agent) Start(ctx context.Context) error { } } } else { - if a.baseDeps.UseV2Resources() { - return fmt.Errorf("can't start agent: client agents are not supported with v2 resources") - } - - // the conn is used to connect to the consul server agent - conn, err := a.baseDeps.GRPCConnPool.ClientConn(a.baseDeps.RuntimeConfig.Datacenter) - if err != nil { - return err - } a.externalGRPCServer = external.NewServer( a.logger.Named("grpc.external"), metrics.Default(), @@ -769,7 +723,6 @@ func (a *Agent) Start(ctx context.Context) error { Time: a.config.GRPCKeepaliveInterval, Timeout: a.config.GRPCKeepaliveTimeout, }, - conn, ) client, err := consul.NewClient(consulCfg, a.baseDeps.Deps) @@ -808,25 +761,21 @@ func (a *Agent) Start(ctx context.Context) error { return err } - intentionDefaultAllow, err := a.config.ACLResolverSettings.IsDefaultAllow() - if err != nil { - return fmt.Errorf("unexpected ACL default policy value of %q", a.config.ACLResolverSettings.ACLDefaultPolicy) - } - - // If DefaultIntentionPolicy is defined, it should override - // the values inherited from ACLDefaultPolicy. - switch a.config.DefaultIntentionPolicy { + var intentionDefaultAllow bool + switch a.config.ACLResolverSettings.ACLDefaultPolicy { case "allow": intentionDefaultAllow = true case "deny": intentionDefaultAllow = false + default: + return fmt.Errorf("unexpected ACL default policy value of %q", a.config.ACLResolverSettings.ACLDefaultPolicy) } go a.baseDeps.ViewStore.Run(&lib.StopChannelContext{StopCh: a.shutdownCh}) // Start the proxy config manager. a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{ - DataSources: a.proxyDataSources(consulServer), + DataSources: a.proxyDataSources(), Logger: a.logger.Named(logging.ProxyConfig), Source: &structs.QuerySource{ Datacenter: a.config.Datacenter, @@ -854,7 +803,6 @@ func (a *Agent) Start(ctx context.Context) error { Logger: a.proxyConfig.Logger.Named("agent-state"), Tokens: a.baseDeps.Tokens, NodeName: a.config.NodeName, - NodeLocality: a.config.StructLocality(), ResyncFrequency: a.config.LocalProxyConfigResyncInterval, }, ) @@ -877,15 +825,8 @@ func (a *Agent) Start(ctx context.Context) error { } // start DNS servers - if a.baseDeps.UseV2DNS() { - a.logger.Warn("DNS v2 is under construction") - if err := a.listenAndServeV2DNS(); err != nil { - return err - } - } else { - if err := a.listenAndServeV1DNS(); err != nil { - return err - } + if err := a.listenAndServeDNS(); err != nil { + return err } // Configure the http connection limiter. @@ -906,7 +847,7 @@ func (a *Agent) Start(ctx context.Context) error { } // Start grpc and grpc_tls servers. - if err := a.listenAndServeGRPC(proxyTracker, consulServer); err != nil { + if err := a.listenAndServeGRPC(); err != nil { return err } @@ -944,6 +885,17 @@ func (a *Agent) Start(ctx context.Context) error { }() } + if a.scadaProvider != nil { + a.scadaProvider.UpdateMeta(map[string]string{ + "consul_server_id": string(a.config.NodeID), + }) + + if err = a.scadaProvider.Start(); err != nil { + a.baseDeps.Logger.Error("scada provider failed to start, some HashiCorp Cloud Platform functionality has been disabled", + "error", err, "resource_id", a.config.Cloud.ResourceID) + } + } + return nil } @@ -960,60 +912,39 @@ func (a *Agent) Failed() <-chan struct{} { return a.apiServers.failed } -// configureXDSServer configures an XDS server with the proper implementation of -// the PRoxyWatcher interface and registers the XDS server with Consul's -// external facing GRPC server. -func (a *Agent) configureXDSServer(proxyWatcher xds.ProxyWatcher, server *consul.Server) { +func (a *Agent) listenAndServeGRPC() error { + if len(a.config.GRPCAddrs) < 1 && len(a.config.GRPCTLSAddrs) < 1 { + return nil + } // TODO(agentless): rather than asserting the concrete type of delegate, we // should add a method to the Delegate interface to build a ConfigSource. - if server != nil { - switch proxyWatcher.(type) { - case *proxytracker.ProxyTracker: - go func() { - <-a.shutdownCh - proxyWatcher.(*proxytracker.ProxyTracker).Shutdown() - }() - default: - catalogCfg := catalogproxycfg.NewConfigSource(catalogproxycfg.Config{ - NodeName: a.config.NodeName, - LocalState: a.State, - LocalConfigSource: proxyWatcher, - Manager: a.proxyConfig, - GetStore: func() catalogproxycfg.Store { return server.FSM().State() }, - Logger: a.proxyConfig.Logger.Named("server-catalog"), - SessionLimiter: a.baseDeps.XDSStreamLimiter, - }) - go func() { - <-a.shutdownCh - catalogCfg.Shutdown() - }() - proxyWatcher = catalogCfg - } + var cfg xds.ProxyConfigSource = localproxycfg.NewConfigSource(a.proxyConfig) + if server, ok := a.delegate.(*consul.Server); ok { + catalogCfg := catalogproxycfg.NewConfigSource(catalogproxycfg.Config{ + NodeName: a.config.NodeName, + LocalState: a.State, + LocalConfigSource: cfg, + Manager: a.proxyConfig, + GetStore: func() catalogproxycfg.Store { return server.FSM().State() }, + Logger: a.proxyConfig.Logger.Named("server-catalog"), + SessionLimiter: a.baseDeps.XDSStreamLimiter, + }) + go func() { + <-a.shutdownCh + catalogCfg.Shutdown() + }() + cfg = catalogCfg } a.xdsServer = xds.NewServer( a.config.NodeName, a.logger.Named(logging.Envoy), - proxyWatcher, + cfg, func(id string) (acl.Authorizer, error) { return a.delegate.ResolveTokenAndDefaultMeta(id, nil, nil) }, a, ) a.xdsServer.Register(a.externalGRPCServer) -} - -func (a *Agent) listenAndServeGRPC(proxyTracker *proxytracker.ProxyTracker, server *consul.Server) error { - if len(a.config.GRPCAddrs) < 1 && len(a.config.GRPCTLSAddrs) < 1 { - return nil - } - var proxyWatcher xds.ProxyWatcher - if a.baseDeps.UseV2Resources() { - proxyWatcher = proxyTracker - } else { - proxyWatcher = localproxycfg.NewConfigSource(a.proxyConfig) - } - - a.configureXDSServer(proxyWatcher, server) // Attempt to spawn listeners var listeners []net.Listener @@ -1064,7 +995,7 @@ func (a *Agent) listenAndServeGRPC(proxyTracker *proxytracker.ProxyTracker, serv return nil } -func (a *Agent) listenAndServeV1DNS() error { +func (a *Agent) listenAndServeDNS() error { notif := make(chan net.Addr, len(a.config.DNSAddrs)) errCh := make(chan error, len(a.config.DNSAddrs)) for _, addr := range a.config.DNSAddrs { @@ -1116,92 +1047,6 @@ func (a *Agent) listenAndServeV1DNS() error { return merr.ErrorOrNil() } -func (a *Agent) listenAndServeV2DNS() error { - - // Check the catalog version and decide which implementation of the data fetcher to implement - if a.baseDeps.UseV2Resources() { - a.catalogDataFetcher = discovery.NewV2DataFetcher(a.config, a.delegate.ResourceServiceClient(), a.logger.Named("catalog-data-fetcher")) - } else { - a.catalogDataFetcher = discovery.NewV1DataFetcher(a.config, - a.AgentEnterpriseMeta(), - a.cache.Get, - a.RPC, - a.rpcClientHealth.ServiceNodes, - a.rpcClientConfigEntry.GetSamenessGroup, - a.TranslateServicePort, - a.logger.Named("catalog-data-fetcher")) - } - - // Generate a Query Processor with the appropriate data fetcher - processor := discovery.NewQueryProcessor(a.catalogDataFetcher) - - notif := make(chan net.Addr, len(a.config.DNSAddrs)) - errCh := make(chan error, len(a.config.DNSAddrs)) - - // create server - cfg := dns.Config{ - AgentConfig: a.config, - EntMeta: *a.AgentEnterpriseMeta(), - Logger: a.logger, - Processor: processor, - TokenFunc: a.getTokenFunc(), - TranslateAddressFunc: a.TranslateAddress, - TranslateServiceAddressFunc: a.TranslateServiceAddress, - } - - for _, addr := range a.config.DNSAddrs { - s, err := dns.NewServer(cfg) - if err != nil { - return err - } - a.dnsServers = append(a.dnsServers, s) - - // start server - a.wgServers.Add(1) - go func(addr net.Addr) { - defer a.wgServers.Done() - err := s.ListenAndServe(addr.Network(), addr.String(), func() { notif <- addr }) - if err != nil && !strings.Contains(err.Error(), "accept") { - errCh <- err - } - }(addr) - } - - s, err := dns.NewServer(cfg) - if err != nil { - return fmt.Errorf("failed to create grpc dns server: %w", err) - } - - // Create a v2 compatible grpc dns server - grpcDNS.NewServerV2(grpcDNS.ConfigV2{ - Logger: a.logger.Named("grpc-api.dns"), - DNSRouter: s.Router, - TokenFunc: a.getTokenFunc(), - }).Register(a.externalGRPCServer) - - a.dnsServers = append(a.dnsServers, s) - - // wait for servers to be up - timeout := time.After(time.Second) - var merr *multierror.Error - for range a.config.DNSAddrs { - select { - case addr := <-notif: - a.logger.Info("Started DNS server", - "address", addr.String(), - "network", addr.Network(), - ) - - case err := <-errCh: - merr = multierror.Append(merr, err) - case <-timeout: - merr = multierror.Append(merr, fmt.Errorf("agent: timeout starting DNS servers")) - return merr.ErrorOrNil() - } - } - return merr.ErrorOrNil() -} - // startListeners will return a net.Listener for every address unless an // error is encountered, in which case it will close all previously opened // listeners and return the error. @@ -1320,7 +1165,7 @@ func (a *Agent) listenHTTP() ([]apiServer, error) { } httpAddrs := a.config.HTTPAddrs - if a.scadaProvider != nil { + if a.config.IsCloudEnabled() { httpAddrs = append(httpAddrs, scada.CAPCoreAPI) } @@ -1721,7 +1566,7 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co cfg.RequestLimitsWriteRate = runtimeCfg.RequestLimitsWriteRate cfg.Locality = runtimeCfg.StructLocality() - cfg.Cloud = runtimeCfg.Cloud + cfg.Cloud.ManagementToken = runtimeCfg.Cloud.ManagementToken cfg.Reporting.License.Enabled = runtimeCfg.Reporting.License.Enabled @@ -1937,7 +1782,14 @@ func (a *Agent) ShutdownEndpoints() { ctx := context.TODO() for _, srv := range a.dnsServers { - srv.Shutdown() + if srv.Server != nil { + a.logger.Info("Stopping server", + "protocol", "DNS", + "address", srv.Server.Addr, + "network", srv.Server.Net, + ) + srv.Shutdown() + } } a.dnsServers = nil @@ -2772,13 +2624,13 @@ func (a *Agent) validateService(service *structs.NodeService, chkTypes []*struct } // Warn if the service name is incompatible with DNS - if libdns.InvalidNameRe.MatchString(service.Service) { + if dns.InvalidNameRe.MatchString(service.Service) { a.logger.Warn("Service name will not be discoverable "+ "via DNS due to invalid characters. Valid characters include "+ "all alpha-numerics and dashes.", "service", service.Service, ) - } else if len(service.Service) > libdns.MaxLabelLength { + } else if len(service.Service) > dns.MaxLabelLength { a.logger.Warn("Service name will not be discoverable "+ "via DNS due to it being too long. Valid lengths are between "+ "1 and 63 bytes.", @@ -2788,13 +2640,13 @@ func (a *Agent) validateService(service *structs.NodeService, chkTypes []*struct // Warn if any tags are incompatible with DNS for _, tag := range service.Tags { - if libdns.InvalidNameRe.MatchString(tag) { + if dns.InvalidNameRe.MatchString(tag) { a.logger.Debug("Service tag will not be discoverable "+ "via DNS due to invalid characters. Valid characters include "+ "all alpha-numerics and dashes.", "tag", tag, ) - } else if len(tag) > libdns.MaxLabelLength { + } else if len(tag) > dns.MaxLabelLength { a.logger.Debug("Service tag will not be discoverable "+ "via DNS due to it being too long. Valid lengths are between "+ "1 and 63 bytes.", @@ -3804,13 +3656,6 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI } ns := service.NodeService() - - // We currently do not persist locality inherited from the node service - // (it is inherited at runtime). See agent/proxycfg-sources/local/sync.go. - // To support locality-aware service discovery in the future, persisting - // this data may be necessary. This does not impact agent-less deployments - // because locality is explicitly set on service registration there. - chkTypes, err := service.CheckTypes() if err != nil { return fmt.Errorf("Failed to validate checks for service %q: %v", service.Name, err) @@ -4413,10 +4258,6 @@ func (a *Agent) reloadConfigInternal(newCfg *config.RuntimeConfig) error { return fmt.Errorf("Failed reloading dns config : %v", err) } } - // This field is only populated for the V2 DNS server - if a.catalogDataFetcher != nil { - a.catalogDataFetcher.LoadConfig(newCfg) - } err := a.reloadEnterprise(newCfg) if err != nil { @@ -4709,7 +4550,7 @@ func (a *Agent) listenerPortLocked(svcID structs.ServiceID, checkID structs.Chec return port, nil } -func (a *Agent) proxyDataSources(server *consul.Server) proxycfg.DataSources { +func (a *Agent) proxyDataSources() proxycfg.DataSources { sources := proxycfg.DataSources{ CARoots: proxycfgglue.CacheCARoots(a.cache), CompiledDiscoveryChain: proxycfgglue.CacheCompiledDiscoveryChain(a.cache), @@ -4736,7 +4577,7 @@ func (a *Agent) proxyDataSources(server *consul.Server) proxycfg.DataSources { ExportedPeeredServices: proxycfgglue.CacheExportedPeeredServices(a.cache), } - if server != nil { + if server, ok := a.delegate.(*consul.Server); ok { deps := proxycfgglue.ServerDataSourceDeps{ Datacenter: a.config.Datacenter, EventPublisher: a.baseDeps.EventPublisher, @@ -4758,8 +4599,8 @@ func (a *Agent) proxyDataSources(server *consul.Server) proxycfg.DataSources { sources.Health = proxycfgglue.ServerHealthBlocking(deps, proxycfgglue.ClientHealth(a.rpcClientHealth)) sources.HTTPChecks = proxycfgglue.ServerHTTPChecks(deps, a.config.NodeName, proxycfgglue.CacheHTTPChecks(a.cache), a.State) sources.Intentions = proxycfgglue.ServerIntentions(deps) - sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps, a.config.DefaultIntentionPolicy) - sources.IntentionUpstreamsDestination = proxycfgglue.ServerIntentionUpstreamsDestination(deps, a.config.DefaultIntentionPolicy) + sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps) + sources.IntentionUpstreamsDestination = proxycfgglue.ServerIntentionUpstreamsDestination(deps) sources.InternalServiceDump = proxycfgglue.ServerInternalServiceDump(deps, proxycfgglue.CacheInternalServiceDump(a.cache)) sources.PeeringList = proxycfgglue.ServerPeeringList(deps) sources.PeeredUpstreams = proxycfgglue.ServerPeeredUpstreams(deps) @@ -4893,13 +4734,3 @@ func defaultIfEmpty(val, defaultVal string) string { } return defaultVal } - -func (a *Agent) getTokenFunc() func() string { - return func() string { - if a.tokens.DNSToken() != "" { - return a.tokens.DNSToken() - } else { - return a.tokens.UserToken() - } - } -} diff --git a/agent/agent_ce.go b/agent/agent_ce.go index a4a6cbf809d9e..e8cfea681b3cb 100644 --- a/agent/agent_ce.go +++ b/agent/agent_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/agent_ce_test.go b/agent/agent_ce_test.go index f9eed9018ba0a..ceb90beb0634c 100644 --- a/agent/agent_ce_test.go +++ b/agent/agent_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 5360eafe285b0..ade06172d84e0 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -11,19 +11,17 @@ import ( "strings" "time" - "github.com/mitchellh/hashstructure" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/hashicorp/go-bexpr" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" "github.com/hashicorp/serf/coordinate" "github.com/hashicorp/serf/serf" + "github.com/mitchellh/hashstructure" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/hashicorp/consul/acl" cachetype "github.com/hashicorp/consul/agent/cache-types" - "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/debug" "github.com/hashicorp/consul/agent/leafcert" @@ -1169,13 +1167,6 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http. // Get the node service. ns := args.NodeService() - - // We currently do not persist locality inherited from the node service - // (it is inherited at runtime). See agent/proxycfg-sources/local/sync.go. - // To support locality-aware service discovery in the future, persisting - // this data may be necessary. This does not impact agent-less deployments - // because locality is explicitly set on service registration there. - if ns.Weights != nil { if err := structs.ValidateWeights(ns.Weights); err != nil { return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid Weights: %v", err)} @@ -1542,9 +1533,6 @@ func (s *HTTPHandlers) AgentToken(resp http.ResponseWriter, req *http.Request) ( case "config_file_service_registration": s.agent.tokens.UpdateConfigFileRegistrationToken(args.Token, token_store.TokenSourceAPI) - case "dns_token", "dns": - s.agent.tokens.UpdateDNSToken(args.Token, token_store.TokenSourceAPI) - default: return HTTPError{StatusCode: http.StatusNotFound, Reason: fmt.Sprintf("Token %q is unknown", target)} } @@ -1667,112 +1655,14 @@ func (s *HTTPHandlers) AgentConnectAuthorize(resp http.ResponseWriter, req *http return nil, nil } - // We need to have a target to check intentions - if authReq.Target == "" { - return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Target service must be specified"} - } - - // Parse the certificate URI from the client ID - uri, err := connect.ParseCertURIFromString(authReq.ClientCertURI) + authz, reason, cacheMeta, err := s.agent.ConnectAuthorize(token, &authReq) if err != nil { - return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "ClientCertURI not a valid Connect identifier"} - } - - uriService, ok := uri.(*connect.SpiffeIDService) - if !ok { - return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "ClientCertURI not a valid Service identifier"} - } - - // We need to verify service:write permissions for the given token. - // We do this manually here since the RPC request below only verifies - // service:read. - var authzContext acl.AuthorizerContext - authz, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, &authReq.EnterpriseMeta, &authzContext) - if err != nil { - return nil, fmt.Errorf("Could not resolve token to authorizer: %w", err) - } - - if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(authReq.Target, &authzContext); err != nil { return nil, err } - - if !uriService.MatchesPartition(authReq.TargetPartition()) { - return nil, HTTPError{ - StatusCode: http.StatusBadRequest, - Reason: fmt.Sprintf("Mismatched partitions: %q != %q", - uriService.PartitionOrDefault(), - acl.PartitionOrDefault(authReq.TargetPartition())), - } - } - - // Get the intentions for this target service. - args := &structs.IntentionQueryRequest{ - Datacenter: s.agent.config.Datacenter, - Match: &structs.IntentionQueryMatch{ - Type: structs.IntentionMatchDestination, - Entries: []structs.IntentionMatchEntry{ - { - Namespace: authReq.TargetNamespace(), - Partition: authReq.TargetPartition(), - Name: authReq.Target, - }, - }, - }, - QueryOptions: structs.QueryOptions{Token: token}, - } - - raw, meta, err := s.agent.cache.Get(req.Context(), cachetype.IntentionMatchName, args) - if err != nil { - return nil, fmt.Errorf("failed getting intention match: %w", err) - } - - reply, ok := raw.(*structs.IndexedIntentionMatches) - if !ok { - return nil, fmt.Errorf("internal error: response type not correct") - } - if len(reply.Matches) != 1 { - return nil, fmt.Errorf("Internal error loading matches") - } - - // Figure out which source matches this request. - var ixnMatch *structs.Intention - for _, ixn := range reply.Matches[0] { - // We match on the intention source because the uriService is the source of the connection to authorize. - if _, ok := connect.AuthorizeIntentionTarget( - uriService.Service, uriService.Namespace, uriService.Partition, "", ixn, structs.IntentionMatchSource); ok { - ixnMatch = ixn - break - } - } - - var ( - authorized bool - reason string - ) - - if ixnMatch != nil { - if len(ixnMatch.Permissions) == 0 { - // This is an L4 intention. - reason = fmt.Sprintf("Matched L4 intention: %s", ixnMatch.String()) - authorized = ixnMatch.Action == structs.IntentionActionAllow - } else { - reason = fmt.Sprintf("Matched L7 intention: %s", ixnMatch.String()) - // This is an L7 intention, so DENY. - authorized = false - } - } else if s.agent.config.DefaultIntentionPolicy != "" { - reason = "Default intention policy" - authorized = s.agent.config.DefaultIntentionPolicy == structs.IntentionDefaultPolicyAllow - } else { - reason = "Default behavior configured by ACLs" - //nolint:staticcheck - authorized = authz.IntentionDefaultAllow(nil) == acl.Allow - } - - setCacheMeta(resp, &meta) + setCacheMeta(resp, cacheMeta) return &connectAuthorizeResp{ - Authorized: authorized, + Authorized: authz, Reason: reason, }, nil } diff --git a/agent/agent_endpoint_ce.go b/agent/agent_endpoint_ce.go index 657d5122fe247..48b9c439cac47 100644 --- a/agent/agent_endpoint_ce.go +++ b/agent/agent_endpoint_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/agent_endpoint_ce_test.go b/agent/agent_endpoint_ce_test.go index 1b1dc866837fd..763a5a006049c 100644 --- a/agent/agent_endpoint_ce_test.go +++ b/agent/agent_endpoint_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 9fd76fae4fa01..5814cf9a8a9cd 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -21,6 +21,10 @@ import ( "time" "github.com/armon/go-metrics" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/version" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" "github.com/hashicorp/serf/serf" @@ -40,14 +44,12 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" tokenStore "github.com/hashicorp/consul/agent/token" - "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/envoyextensions/xdscommon" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/types" - "github.com/hashicorp/consul/version" ) func createACLTokenWithAgentReadPolicy(t *testing.T, srv *HTTPHandlers) string { @@ -79,46 +81,6 @@ func createACLTokenWithAgentReadPolicy(t *testing.T, srv *HTTPHandlers) string { return svcToken.SecretID } -func TestAgentEndpointsFailInV2(t *testing.T) { - t.Parallel() - - a := NewTestAgent(t, `experiments = ["resource-apis"]`) - - checkRequest := func(method, url string) { - t.Run(method+" "+url, func(t *testing.T) { - assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, `{}`) - }) - } - - t.Run("agent-self-with-params", func(t *testing.T) { - req, err := http.NewRequest("GET", "/v1/agent/self?dc=dc1", nil) - require.NoError(t, err) - - resp := httptest.NewRecorder() - a.srv.h.ServeHTTP(resp, req) - require.Equal(t, http.StatusOK, resp.Code) - - _, err = io.ReadAll(resp.Body) - require.NoError(t, err) - }) - - checkRequest("PUT", "/v1/agent/maintenance") - checkRequest("GET", "/v1/agent/services") - checkRequest("GET", "/v1/agent/service/web") - checkRequest("GET", "/v1/agent/checks") - checkRequest("GET", "/v1/agent/health/service/id/web") - checkRequest("GET", "/v1/agent/health/service/name/web") - checkRequest("PUT", "/v1/agent/check/register") - checkRequest("PUT", "/v1/agent/check/deregister/web") - checkRequest("PUT", "/v1/agent/check/pass/web") - checkRequest("PUT", "/v1/agent/check/warn/web") - checkRequest("PUT", "/v1/agent/check/fail/web") - checkRequest("PUT", "/v1/agent/check/update/web") - checkRequest("PUT", "/v1/agent/service/register") - checkRequest("PUT", "/v1/agent/service/deregister/web") - checkRequest("PUT", "/v1/agent/service/maintenance/web") -} - func TestAgent_Services(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -1638,37 +1600,14 @@ func TestAgent_Metrics_ACLDeny(t *testing.T) { }) } -func newDefaultBaseDeps(t *testing.T) BaseDeps { - dataDir := testutil.TempDir(t, "acl-agent") - logBuffer := testutil.NewLogBuffer(t) - logger := hclog.NewInterceptLogger(nil) - loader := func(source config.Source) (config.LoadResult, error) { - dataDir := fmt.Sprintf(`data_dir = "%s"`, dataDir) - opts := config.LoadOpts{ - HCL: []string{TestConfigHCL(NodeID()), "", dataDir}, - DefaultConfig: source, - } - result, err := config.Load(opts) - if result.RuntimeConfig != nil { - result.RuntimeConfig.Telemetry.Disable = true - } - return result, err - } - bd, err := NewBaseDeps(loader, logBuffer, logger) - require.NoError(t, err) - return bd -} - func TestHTTPHandlers_AgentMetricsStream_ACLDeny(t *testing.T) { - bd := newDefaultBaseDeps(t) + bd := BaseDeps{} bd.Tokens = new(tokenStore.Store) sink := metrics.NewInmemSink(30*time.Millisecond, time.Second) bd.MetricsConfig = &lib.MetricsConfig{ Handler: sink, } - mockDelegate := delegateMock{} - mockDelegate.On("LicenseCheck").Return() - d := fakeResolveTokenDelegate{delegate: &mockDelegate, authorizer: acl.DenyAll()} + d := fakeResolveTokenDelegate{authorizer: acl.DenyAll()} agent := &Agent{ baseDeps: bd, delegate: d, @@ -1691,15 +1630,13 @@ func TestHTTPHandlers_AgentMetricsStream_ACLDeny(t *testing.T) { } func TestHTTPHandlers_AgentMetricsStream(t *testing.T) { - bd := newDefaultBaseDeps(t) + bd := BaseDeps{} bd.Tokens = new(tokenStore.Store) sink := metrics.NewInmemSink(20*time.Millisecond, time.Second) bd.MetricsConfig = &lib.MetricsConfig{ Handler: sink, } - mockDelegate := delegateMock{} - mockDelegate.On("LicenseCheck").Return() - d := fakeResolveTokenDelegate{delegate: &mockDelegate, authorizer: acl.ManageAll()} + d := fakeResolveTokenDelegate{authorizer: acl.ManageAll()} agent := &Agent{ baseDeps: bd, delegate: d, @@ -1877,7 +1814,7 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) { require.NoError(t, a.updateTTLCheck(checkID, api.HealthPassing, "testing-agent-reload-001")) checkStr := func(r *retry.R, evaluator func(string) error) { - r.Helper() + t.Helper() contentsStr := "" // Wait for watch to be populated for i := 1; i < 7; i++ { @@ -1890,14 +1827,14 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) { break } time.Sleep(time.Duration(i) * time.Second) - testutil.Logger(r).Info("Watch not yet populated, retrying") + testutil.Logger(t).Info("Watch not yet populated, retrying") } if err := evaluator(contentsStr); err != nil { r.Errorf("ERROR: Test failing: %s", err) } } ensureNothingCritical := func(r *retry.R, mustContain string) { - r.Helper() + t.Helper() eval := func(contentsStr string) error { if strings.Contains(contentsStr, "critical") { return fmt.Errorf("MUST NOT contain critical:= %s", contentsStr) @@ -1915,7 +1852,7 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) { } retry.RunWith(retriesWithDelay(), t, func(r *retry.R) { - testutil.Logger(r).Info("Consul is now ready") + testutil.Logger(t).Info("Consul is now ready") // it should contain the output checkStr(r, func(contentStr string) error { if contentStr == "[]" { @@ -4340,7 +4277,7 @@ func testDefaultSidecar(svc string, port int, fns ...func(*structs.NodeService)) } // testCreateToken creates a Policy for the provided rules and a Token linked to that Policy. -func testCreateToken(t testutil.TestingTB, a *TestAgent, rules string) string { +func testCreateToken(t *testing.T, a *TestAgent, rules string) string { policyName, err := uuid.GenerateUUID() // we just need a unique name for the test and UUIDs are definitely unique require.NoError(t, err) @@ -4369,7 +4306,7 @@ func testCreateToken(t testutil.TestingTB, a *TestAgent, rules string) string { return aclResp.SecretID } -func testCreatePolicy(t testutil.TestingTB, a *TestAgent, name, rules string) string { +func testCreatePolicy(t *testing.T, a *TestAgent, name, rules string) string { args := map[string]interface{}{ "Name": name, "Rules": rules, @@ -8015,104 +7952,76 @@ func TestAgentConnectAuthorize_serviceWrite(t *testing.T) { assert.Equal(t, http.StatusForbidden, resp.Code) } -func TestAgentConnectAuthorize_DefaultIntentionPolicy(t *testing.T) { +// Test when no intentions match w/ a default deny policy +func TestAgentConnectAuthorize_defaultDeny(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } t.Parallel() - agentConfig := `primary_datacenter = "dc1" -default_intention_policy = "%s" -` - aclBlock := `acl { - enabled = true - default_policy = "%s" - tokens { - initial_management = "root" - agent = "root" - agent_recovery = "towel" - } -} -` - - type testcase struct { - aclsEnabled bool - defaultACL string - defaultIxn string - expectAuthz bool - expectReason string - } - tcs := map[string]testcase{ - "no ACLs, default intention allow": { - aclsEnabled: false, - defaultIxn: "allow", - expectAuthz: true, - expectReason: "Default intention policy", - }, - "no ACLs, default intention deny": { - aclsEnabled: false, - defaultIxn: "deny", - expectAuthz: false, - expectReason: "Default intention policy", - }, - "ACL deny, no intention policy": { - aclsEnabled: true, - defaultACL: "deny", - expectAuthz: false, - expectReason: "Default behavior configured by ACLs", - }, - "ACL allow, no intention policy": { - aclsEnabled: true, - defaultACL: "allow", - expectAuthz: true, - expectReason: "Default behavior configured by ACLs", - }, - "ACL deny, default intentions allow": { - aclsEnabled: true, - defaultACL: "deny", - defaultIxn: "allow", - expectAuthz: true, - expectReason: "Default intention policy", - }, - "ACL allow, default intentions deny": { - aclsEnabled: true, - defaultACL: "allow", - defaultIxn: "deny", - expectAuthz: false, - expectReason: "Default intention policy", - }, - } - for name, tc := range tcs { - tc := tc - t.Run(name, func(t *testing.T) { - t.Parallel() + a := NewTestAgent(t, TestACLConfig()) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - conf := fmt.Sprintf(agentConfig, tc.defaultIxn) - if tc.aclsEnabled { - conf += fmt.Sprintf(aclBlock, tc.defaultACL) - } - a := NewTestAgent(t, conf) + args := &structs.ConnectAuthorizeRequest{ + Target: "foo", + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), + } + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + req.Header.Add("X-Consul-Token", "root") + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + assert.Equal(t, 200, resp.Code) - testrpc.WaitForLeader(t, a.RPC, "dc1") + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(t, dec.Decode(obj)) + assert.False(t, obj.Authorized) + assert.Contains(t, obj.Reason, "Default behavior") +} + +// Test when no intentions match w/ a default allow policy +func TestAgentConnectAuthorize_defaultAllow(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } - args := &structs.ConnectAuthorizeRequest{ - Target: "foo", - ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), + t.Parallel() + + dc1 := "dc1" + a := NewTestAgent(t, ` + primary_datacenter = "`+dc1+`" + + acl { + enabled = true + default_policy = "allow" + + tokens { + initial_management = "root" + agent = "root" + agent_recovery = "towel" } - req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) - req.Header.Add("X-Consul-Token", "root") - resp := httptest.NewRecorder() - a.srv.h.ServeHTTP(resp, req) - assert.Equal(t, 200, resp.Code) + } + `) + defer a.Shutdown() + testrpc.WaitForTestAgent(t, a.RPC, dc1) - dec := json.NewDecoder(resp.Body) - obj := &connectAuthorizeResp{} - require.NoError(t, dec.Decode(obj)) - assert.Equal(t, tc.expectAuthz, obj.Authorized) - assert.Contains(t, obj.Reason, tc.expectReason) - }) + args := &structs.ConnectAuthorizeRequest{ + Target: "foo", + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), } + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + req.Header.Add("X-Consul-Token", "root") + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + assert.Equal(t, 200, resp.Code) + + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(t, dec.Decode(obj)) + assert.True(t, obj.Authorized) + assert.Contains(t, obj.Reason, "Default behavior") } func TestAgent_Host(t *testing.T) { diff --git a/agent/agent_test.go b/agent/agent_test.go index 2cf0c2f4ccd49..e952d9dd87a19 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -32,7 +32,12 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/tcpproxy" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/hcp-scada-provider/capability" + "github.com/hashicorp/serf/coordinate" + "github.com/hashicorp/serf/serf" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" "golang.org/x/time/rate" @@ -40,11 +45,6 @@ import ( "google.golang.org/protobuf/encoding/protojson" "gopkg.in/square/go-jose.v2/jwt" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/hcp-scada-provider/capability" - "github.com/hashicorp/serf/coordinate" - "github.com/hashicorp/serf/serf" - "github.com/hashicorp/consul/agent/cache" cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/checks" @@ -58,7 +58,6 @@ import ( "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/proto/private/pbautoconf" @@ -90,7 +89,7 @@ func requireServiceMissing(t *testing.T, a *TestAgent, id string) { require.Nil(t, getService(a, id), "have service %q (expected missing)", id) } -func requireCheckExists(t testutil.TestingTB, a *TestAgent, id types.CheckID) *structs.HealthCheck { +func requireCheckExists(t *testing.T, a *TestAgent, id types.CheckID) *structs.HealthCheck { t.Helper() chk := getCheck(a, id) require.NotNil(t, chk, "missing check %q", id) @@ -324,7 +323,6 @@ func TestAgent_HTTPMaxHeaderBytes(t *testing.T) { Tokens: new(token.Store), TLSConfigurator: tlsConf, GRPCConnPool: &fakeGRPCConnPool{}, - Registry: resource.NewRegistry(), }, RuntimeConfig: &config.RuntimeConfig{ HTTPAddrs: []net.Addr{ @@ -348,9 +346,6 @@ func TestAgent_HTTPMaxHeaderBytes(t *testing.T) { a, err := New(bd) require.NoError(t, err) - mockDelegate := delegateMock{} - mockDelegate.On("LicenseCheck").Return() - a.delegate = &mockDelegate a.startLicenseManager(testutil.TestContext(t)) @@ -385,8 +380,6 @@ func TestAgent_HTTPMaxHeaderBytes(t *testing.T) { resp, err := client.Do(req.WithContext(ctx)) require.NoError(t, err) require.Equal(t, tt.expectedHTTPResponse, resp.StatusCode, "expected a '%d' http response, got '%d'", tt.expectedHTTPResponse, resp.StatusCode) - resp.Body.Close() - s.Shutdown(ctx) } }) } @@ -853,7 +846,7 @@ func TestAgent_CheckAliasRPC(t *testing.T) { assert.NoError(t, err) retry.Run(t, func(r *retry.R) { - r.Helper() + t.Helper() var args structs.NodeSpecificRequest args.Datacenter = "dc1" args.Node = "node1" @@ -1888,7 +1881,7 @@ func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) { // We do this so that the agent logs and the informational messages from // the test itself are interwoven properly. - logf := func(a *TestAgent, format string, args ...interface{}) { + logf := func(t *testing.T, a *TestAgent, format string, args ...interface{}) { a.logger.Info("testharness: " + fmt.Sprintf(format, args...)) } @@ -1947,12 +1940,12 @@ func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) { retryUntilCheckState := func(t *testing.T, a *TestAgent, checkID string, expectedStatus string) { t.Helper() retry.Run(t, func(r *retry.R) { - chk := requireCheckExists(r, a, types.CheckID(checkID)) + chk := requireCheckExists(t, a, types.CheckID(checkID)) if chk.Status != expectedStatus { - logf(a, "check=%q expected status %q but got %q", checkID, expectedStatus, chk.Status) + logf(t, a, "check=%q expected status %q but got %q", checkID, expectedStatus, chk.Status) r.Fatalf("check=%q expected status %q but got %q", checkID, expectedStatus, chk.Status) } - logf(a, "check %q has reached desired status %q", checkID, expectedStatus) + logf(t, a, "check %q has reached desired status %q", checkID, expectedStatus) }) } @@ -1963,7 +1956,7 @@ func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) { retryUntilCheckState(t, a, "service:ping", api.HealthPassing) retryUntilCheckState(t, a, "service:ping-sidecar-proxy", api.HealthPassing) - logf(a, "==== POWERING DOWN ORIGINAL ====") + logf(t, a, "==== POWERING DOWN ORIGINAL ====") require.NoError(t, a.Shutdown()) @@ -1985,7 +1978,7 @@ node_name = "` + a.Config.NodeName + `" // reregister during standup; we use an adjustable timing to try and force a race sleepDur := time.Duration(idx+1) * 500 * time.Millisecond time.Sleep(sleepDur) - logf(a2, "re-registering checks and services after a delay of %v", sleepDur) + logf(t, a2, "re-registering checks and services after a delay of %v", sleepDur) for i := 0; i < 20; i++ { // RACE RACE RACE! registerServicesAndChecks(t, a2) time.Sleep(50 * time.Millisecond) @@ -1995,7 +1988,7 @@ node_name = "` + a.Config.NodeName + `" retryUntilCheckState(t, a2, "service:ping", api.HealthPassing) - logf(a2, "giving the alias check a chance to notice...") + logf(t, a2, "giving the alias check a chance to notice...") time.Sleep(5 * time.Second) retryUntilCheckState(t, a2, "service:ping-sidecar-proxy", api.HealthPassing) @@ -5559,7 +5552,6 @@ func TestAgent_ListenHTTP_MultipleAddresses(t *testing.T) { Tokens: new(token.Store), TLSConfigurator: tlsConf, GRPCConnPool: &fakeGRPCConnPool{}, - Registry: resource.NewRegistry(), }, RuntimeConfig: &config.RuntimeConfig{ HTTPAddrs: []net.Addr{ @@ -5582,9 +5574,6 @@ func TestAgent_ListenHTTP_MultipleAddresses(t *testing.T) { require.NoError(t, err) agent, err := New(bd) - mockDelegate := delegateMock{} - mockDelegate.On("LicenseCheck").Return() - agent.delegate = &mockDelegate require.NoError(t, err) agent.startLicenseManager(testutil.TestContext(t)) @@ -6159,7 +6148,6 @@ func TestAgent_startListeners(t *testing.T) { Logger: hclog.NewInterceptLogger(nil), Tokens: new(token.Store), GRPCConnPool: &fakeGRPCConnPool{}, - Registry: resource.NewRegistry(), }, RuntimeConfig: &config.RuntimeConfig{ HTTPAddrs: []net.Addr{}, @@ -6178,9 +6166,6 @@ func TestAgent_startListeners(t *testing.T) { require.NoError(t, err) agent, err := New(bd) - mockDelegate := delegateMock{} - mockDelegate.On("LicenseCheck").Return() - agent.delegate = &mockDelegate require.NoError(t, err) // use up an address @@ -6303,7 +6288,6 @@ func TestAgent_startListeners_scada(t *testing.T) { HCP: hcp.Deps{ Provider: pvd, }, - Registry: resource.NewRegistry(), }, RuntimeConfig: &config.RuntimeConfig{}, Cache: cache.New(cache.Options{}), @@ -6321,9 +6305,6 @@ func TestAgent_startListeners_scada(t *testing.T) { require.NoError(t, err) agent, err := New(bd) - mockDelegate := delegateMock{} - mockDelegate.On("LicenseCheck").Return() - agent.delegate = &mockDelegate require.NoError(t, err) _, err = agent.startListeners([]net.Addr{c}) @@ -6338,12 +6319,21 @@ func TestAgent_scadaProvider(t *testing.T) { require.NoError(t, err) defer require.NoError(t, l.Close()) + pvd.EXPECT().UpdateMeta(mock.Anything).Once() + pvd.EXPECT().Start().Return(nil).Once() pvd.EXPECT().Listen(scada.CAPCoreAPI.Capability()).Return(l, nil).Once() pvd.EXPECT().Stop().Return(nil).Once() + pvd.EXPECT().SessionStatus().Return("test") a := TestAgent{ OverrideDeps: func(deps *BaseDeps) { deps.HCP.Provider = pvd }, + Overrides: ` +cloud { + resource_id = "organization/0b9de9a3-8403-4ca6-aba8-fca752f42100/project/0b9de9a3-8403-4ca6-aba8-fca752f42100/consul.cluster/0b9de9a3-8403-4ca6-aba8-fca752f42100" + client_id = "test" + client_secret = "test" +}`, } defer a.Shutdown() require.NoError(t, a.Start(t)) @@ -6358,7 +6348,6 @@ func TestAgent_checkServerLastSeen(t *testing.T) { Logger: hclog.NewInterceptLogger(nil), Tokens: new(token.Store), GRPCConnPool: &fakeGRPCConnPool{}, - Registry: resource.NewRegistry(), }, RuntimeConfig: &config.RuntimeConfig{}, Cache: cache.New(cache.Options{}), @@ -6370,9 +6359,6 @@ func TestAgent_checkServerLastSeen(t *testing.T) { Config: leafcert.Config{}, }) agent, err := New(bd) - mockDelegate := delegateMock{} - mockDelegate.On("LicenseCheck").Return() - agent.delegate = &mockDelegate require.NoError(t, err) // Test that an ErrNotExist OS error is treated as ok. diff --git a/agent/apiserver.go b/agent/apiserver.go index 1f386e3f6b171..a45e16a630b1a 100644 --- a/agent/apiserver.go +++ b/agent/apiserver.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/apiserver_test.go b/agent/apiserver_test.go index 848487a78154c..69188c4248176 100644 --- a/agent/apiserver_test.go +++ b/agent/apiserver_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/auto-config/auto_config.go b/agent/auto-config/auto_config.go index a1a5848f623f0..b73951df70d18 100644 --- a/agent/auto-config/auto_config.go +++ b/agent/auto-config/auto_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/auto_config_ce.go b/agent/auto-config/auto_config_ce.go index 9ac615847e69f..78c9ee66d97ba 100644 --- a/agent/auto-config/auto_config_ce.go +++ b/agent/auto-config/auto_config_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package autoconf diff --git a/agent/auto-config/auto_config_ce_test.go b/agent/auto-config/auto_config_ce_test.go index a38b0f155e391..b075ca7686b6e 100644 --- a/agent/auto-config/auto_config_ce_test.go +++ b/agent/auto-config/auto_config_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package autoconf diff --git a/agent/auto-config/auto_config_test.go b/agent/auto-config/auto_config_test.go index 7c5c629be2f73..a5ab97e0f45d7 100644 --- a/agent/auto-config/auto_config_test.go +++ b/agent/auto-config/auto_config_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/auto_encrypt.go b/agent/auto-config/auto_encrypt.go index 1b77c089f6f60..59af662ee033f 100644 --- a/agent/auto-config/auto_encrypt.go +++ b/agent/auto-config/auto_encrypt.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/auto_encrypt_test.go b/agent/auto-config/auto_encrypt_test.go index d0768080248cd..3efb10de53fe5 100644 --- a/agent/auto-config/auto_encrypt_test.go +++ b/agent/auto-config/auto_encrypt_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/config.go b/agent/auto-config/config.go index 69eee08bc061b..d0f1670ab73a7 100644 --- a/agent/auto-config/config.go +++ b/agent/auto-config/config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/config_ce.go b/agent/auto-config/config_ce.go index 5d699f32a498d..4162bda4c4896 100644 --- a/agent/auto-config/config_ce.go +++ b/agent/auto-config/config_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package autoconf diff --git a/agent/auto-config/config_translate.go b/agent/auto-config/config_translate.go index b60b3388eb2a8..31aeb7cbdb22f 100644 --- a/agent/auto-config/config_translate.go +++ b/agent/auto-config/config_translate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/config_translate_test.go b/agent/auto-config/config_translate_test.go index 8e2cef8c46ea9..9b37c9870e318 100644 --- a/agent/auto-config/config_translate_test.go +++ b/agent/auto-config/config_translate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/mock_ce_test.go b/agent/auto-config/mock_ce_test.go index ea416a3de87cf..872aa5e5438f5 100644 --- a/agent/auto-config/mock_ce_test.go +++ b/agent/auto-config/mock_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package autoconf diff --git a/agent/auto-config/mock_test.go b/agent/auto-config/mock_test.go index 0ef5084af2640..263befae112cb 100644 --- a/agent/auto-config/mock_test.go +++ b/agent/auto-config/mock_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/persist.go b/agent/auto-config/persist.go index 66cda1c41438c..0abaa235451d9 100644 --- a/agent/auto-config/persist.go +++ b/agent/auto-config/persist.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/run.go b/agent/auto-config/run.go index ed3389c1880cb..74a78fde9f0d8 100644 --- a/agent/auto-config/run.go +++ b/agent/auto-config/run.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/server_addr.go b/agent/auto-config/server_addr.go index 6bca15d42fb8d..c70a6431fb33e 100644 --- a/agent/auto-config/server_addr.go +++ b/agent/auto-config/server_addr.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/tls.go b/agent/auto-config/tls.go index dd2d6f9e25e17..696eb905e84db 100644 --- a/agent/auto-config/tls.go +++ b/agent/auto-config/tls.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/auto-config/tls_test.go b/agent/auto-config/tls_test.go index 667c7dfa96e5c..b09ee295e60be 100644 --- a/agent/auto-config/tls_test.go +++ b/agent/auto-config/tls_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autoconf diff --git a/agent/cache-types/catalog_datacenters.go b/agent/cache-types/catalog_datacenters.go index 2a4e64c9e5c1c..12da6e9878e3d 100644 --- a/agent/cache-types/catalog_datacenters.go +++ b/agent/cache-types/catalog_datacenters.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/catalog_datacenters_test.go b/agent/cache-types/catalog_datacenters_test.go index f04bfb4c7b421..bef374d131cda 100644 --- a/agent/cache-types/catalog_datacenters_test.go +++ b/agent/cache-types/catalog_datacenters_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/catalog_list_services.go b/agent/cache-types/catalog_list_services.go index 0a14ed3ef120a..a605c7431388c 100644 --- a/agent/cache-types/catalog_list_services.go +++ b/agent/cache-types/catalog_list_services.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/catalog_list_services_test.go b/agent/cache-types/catalog_list_services_test.go index 623cda2cee3ce..b5da270f962d3 100644 --- a/agent/cache-types/catalog_list_services_test.go +++ b/agent/cache-types/catalog_list_services_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/catalog_service_list.go b/agent/cache-types/catalog_service_list.go index 37ac4ba0f8131..521ed1d3b1ada 100644 --- a/agent/cache-types/catalog_service_list.go +++ b/agent/cache-types/catalog_service_list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/catalog_service_list_test.go b/agent/cache-types/catalog_service_list_test.go index eb686193cc3ca..995f7e8b6c8c0 100644 --- a/agent/cache-types/catalog_service_list_test.go +++ b/agent/cache-types/catalog_service_list_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/catalog_services.go b/agent/cache-types/catalog_services.go index 8e04997b9f62d..21b472ba3124f 100644 --- a/agent/cache-types/catalog_services.go +++ b/agent/cache-types/catalog_services.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/catalog_services_test.go b/agent/cache-types/catalog_services_test.go index c084de67ccaad..8723b9015d719 100644 --- a/agent/cache-types/catalog_services_test.go +++ b/agent/cache-types/catalog_services_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/config_entry.go b/agent/cache-types/config_entry.go index 98443363c1b87..9748c176d1033 100644 --- a/agent/cache-types/config_entry.go +++ b/agent/cache-types/config_entry.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/config_entry_test.go b/agent/cache-types/config_entry_test.go index d892b069e4c7b..11b109d6634a5 100644 --- a/agent/cache-types/config_entry_test.go +++ b/agent/cache-types/config_entry_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/connect_ca_root.go b/agent/cache-types/connect_ca_root.go index 9ba1dab0b7dcd..0d6c8b700ca72 100644 --- a/agent/cache-types/connect_ca_root.go +++ b/agent/cache-types/connect_ca_root.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype @@ -8,12 +8,11 @@ import ( "fmt" "github.com/hashicorp/consul/agent/cache" - "github.com/hashicorp/consul/agent/cacheshim" "github.com/hashicorp/consul/agent/structs" ) // Recommended name for registration. -const ConnectCARootName = cacheshim.ConnectCARootName +const ConnectCARootName = "connect-ca-root" // ConnectCARoot supports fetching the Connect CA roots. This is a // straightforward cache type since it only has to block on the given diff --git a/agent/cache-types/connect_ca_root_test.go b/agent/cache-types/connect_ca_root_test.go index 74aa53c31a4c7..c1e906a8b810f 100644 --- a/agent/cache-types/connect_ca_root_test.go +++ b/agent/cache-types/connect_ca_root_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/discovery_chain.go b/agent/cache-types/discovery_chain.go index e27b621061e1d..8f0f177914887 100644 --- a/agent/cache-types/discovery_chain.go +++ b/agent/cache-types/discovery_chain.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/discovery_chain_test.go b/agent/cache-types/discovery_chain_test.go index a9c9783e882bb..b2b279faf7ddd 100644 --- a/agent/cache-types/discovery_chain_test.go +++ b/agent/cache-types/discovery_chain_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/exported_peered_services.go b/agent/cache-types/exported_peered_services.go index 69bd2d92ba71f..3e8f336281446 100644 --- a/agent/cache-types/exported_peered_services.go +++ b/agent/cache-types/exported_peered_services.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/exported_peered_services_test.go b/agent/cache-types/exported_peered_services_test.go index a2d618bb60c52..4848c2fce9dbe 100644 --- a/agent/cache-types/exported_peered_services_test.go +++ b/agent/cache-types/exported_peered_services_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/federation_state_list_gateways.go b/agent/cache-types/federation_state_list_gateways.go index 501a8bcead289..50658777b8d83 100644 --- a/agent/cache-types/federation_state_list_gateways.go +++ b/agent/cache-types/federation_state_list_gateways.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/federation_state_list_gateways_test.go b/agent/cache-types/federation_state_list_gateways_test.go index 04bd661e80fc6..7aaad80ed3b2e 100644 --- a/agent/cache-types/federation_state_list_gateways_test.go +++ b/agent/cache-types/federation_state_list_gateways_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/gateway_services.go b/agent/cache-types/gateway_services.go index 9c13800beeee0..030cec59ef88d 100644 --- a/agent/cache-types/gateway_services.go +++ b/agent/cache-types/gateway_services.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/gateway_services_test.go b/agent/cache-types/gateway_services_test.go index 49be4edf47808..babc30ead3c14 100644 --- a/agent/cache-types/gateway_services_test.go +++ b/agent/cache-types/gateway_services_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/health_services.go b/agent/cache-types/health_services.go index ae8369364743e..dc1a5e6648ad5 100644 --- a/agent/cache-types/health_services.go +++ b/agent/cache-types/health_services.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/health_services_test.go b/agent/cache-types/health_services_test.go index 6e83ec9a40181..e3680eb2d5adb 100644 --- a/agent/cache-types/health_services_test.go +++ b/agent/cache-types/health_services_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/intention_match.go b/agent/cache-types/intention_match.go index fd69eab65c75a..16671328fd2da 100644 --- a/agent/cache-types/intention_match.go +++ b/agent/cache-types/intention_match.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/intention_match_test.go b/agent/cache-types/intention_match_test.go index 26788b679befb..68a467a29d511 100644 --- a/agent/cache-types/intention_match_test.go +++ b/agent/cache-types/intention_match_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/intention_upstreams.go b/agent/cache-types/intention_upstreams.go index a0e1ea0c0fd35..b918a553526ca 100644 --- a/agent/cache-types/intention_upstreams.go +++ b/agent/cache-types/intention_upstreams.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/intention_upstreams_destination.go b/agent/cache-types/intention_upstreams_destination.go index 1b5200a163c2c..8adba2d7e7426 100644 --- a/agent/cache-types/intention_upstreams_destination.go +++ b/agent/cache-types/intention_upstreams_destination.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/intention_upstreams_destination_test.go b/agent/cache-types/intention_upstreams_destination_test.go index 32852891846f0..d4f8602c7d7e9 100644 --- a/agent/cache-types/intention_upstreams_destination_test.go +++ b/agent/cache-types/intention_upstreams_destination_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/intention_upstreams_test.go b/agent/cache-types/intention_upstreams_test.go index 3259969f03a8f..6f695576d0639 100644 --- a/agent/cache-types/intention_upstreams_test.go +++ b/agent/cache-types/intention_upstreams_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/node_services.go b/agent/cache-types/node_services.go index 44dd5624f5658..2b51de9f62f89 100644 --- a/agent/cache-types/node_services.go +++ b/agent/cache-types/node_services.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/node_services_test.go b/agent/cache-types/node_services_test.go index 6f16f93d5d61f..a1412bbe935be 100644 --- a/agent/cache-types/node_services_test.go +++ b/agent/cache-types/node_services_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/options.go b/agent/cache-types/options.go index cd46060f8bce4..cbfa2ff178ef8 100644 --- a/agent/cache-types/options.go +++ b/agent/cache-types/options.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/peered_upstreams.go b/agent/cache-types/peered_upstreams.go index 964b350eb1505..49997ecdf9675 100644 --- a/agent/cache-types/peered_upstreams.go +++ b/agent/cache-types/peered_upstreams.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/peered_upstreams_test.go b/agent/cache-types/peered_upstreams_test.go index 07be6e4188082..1e9dc29fdf4a5 100644 --- a/agent/cache-types/peered_upstreams_test.go +++ b/agent/cache-types/peered_upstreams_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/peerings.go b/agent/cache-types/peerings.go index 53138e5512d8e..e72b43d563129 100644 --- a/agent/cache-types/peerings.go +++ b/agent/cache-types/peerings.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/peerings_test.go b/agent/cache-types/peerings_test.go index 088a077c4f504..75fc21371eb7d 100644 --- a/agent/cache-types/peerings_test.go +++ b/agent/cache-types/peerings_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/prepared_query.go b/agent/cache-types/prepared_query.go index 8a9ec7720959c..995214a1b45b4 100644 --- a/agent/cache-types/prepared_query.go +++ b/agent/cache-types/prepared_query.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/prepared_query_test.go b/agent/cache-types/prepared_query_test.go index 50850c20fe9bc..26ea4d4c0b028 100644 --- a/agent/cache-types/prepared_query_test.go +++ b/agent/cache-types/prepared_query_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/resolved_service_config.go b/agent/cache-types/resolved_service_config.go index 76c333840f5d3..589afbcc6bd1b 100644 --- a/agent/cache-types/resolved_service_config.go +++ b/agent/cache-types/resolved_service_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/resolved_service_config_test.go b/agent/cache-types/resolved_service_config_test.go index a71cdb7834362..4c8376447ad5f 100644 --- a/agent/cache-types/resolved_service_config_test.go +++ b/agent/cache-types/resolved_service_config_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/rpc.go b/agent/cache-types/rpc.go index 13bfdb3e5a5d7..905547d20fe82 100644 --- a/agent/cache-types/rpc.go +++ b/agent/cache-types/rpc.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/service_checks.go b/agent/cache-types/service_checks.go index 55ea3896f33cb..88a065c94b865 100644 --- a/agent/cache-types/service_checks.go +++ b/agent/cache-types/service_checks.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/service_checks_test.go b/agent/cache-types/service_checks_test.go index 898ea4aa9c937..b936990d91a33 100644 --- a/agent/cache-types/service_checks_test.go +++ b/agent/cache-types/service_checks_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/service_dump.go b/agent/cache-types/service_dump.go index 60c2895aff1f5..3bab11239f04b 100644 --- a/agent/cache-types/service_dump.go +++ b/agent/cache-types/service_dump.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/service_dump_test.go b/agent/cache-types/service_dump_test.go index 3570fc9720a16..8fe39e63b268e 100644 --- a/agent/cache-types/service_dump_test.go +++ b/agent/cache-types/service_dump_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/service_gateways.go b/agent/cache-types/service_gateways.go index a080fc77451ce..d096d136fa2fb 100644 --- a/agent/cache-types/service_gateways.go +++ b/agent/cache-types/service_gateways.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/service_gateways_test.go b/agent/cache-types/service_gateways_test.go index 9f615162b6fb3..c8c62e7c9ad5f 100644 --- a/agent/cache-types/service_gateways_test.go +++ b/agent/cache-types/service_gateways_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/testing.go b/agent/cache-types/testing.go index 3789eff4e2a5e..459feaba9fa37 100644 --- a/agent/cache-types/testing.go +++ b/agent/cache-types/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/trust_bundle.go b/agent/cache-types/trust_bundle.go index 0bac27e2b8c23..301b18977d95c 100644 --- a/agent/cache-types/trust_bundle.go +++ b/agent/cache-types/trust_bundle.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/trust_bundle_test.go b/agent/cache-types/trust_bundle_test.go index f39a15bdc6622..dc39c3555bd51 100644 --- a/agent/cache-types/trust_bundle_test.go +++ b/agent/cache-types/trust_bundle_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/trust_bundles.go b/agent/cache-types/trust_bundles.go index 7098c01af47ad..a485ee53414c0 100644 --- a/agent/cache-types/trust_bundles.go +++ b/agent/cache-types/trust_bundles.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache-types/trust_bundles_test.go b/agent/cache-types/trust_bundles_test.go index f565bab18fdbf..373ba2a8d71f0 100644 --- a/agent/cache-types/trust_bundles_test.go +++ b/agent/cache-types/trust_bundles_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cachetype diff --git a/agent/cache/cache.go b/agent/cache/cache.go index c78a3baaf0901..ed1e4f911ada0 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // Package cache provides caching features for data from a Consul server. // @@ -32,7 +32,6 @@ import ( "golang.org/x/time/rate" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/cacheshim" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/ttlcache" ) @@ -173,7 +172,32 @@ type typeEntry struct { // ResultMeta is returned from Get calls along with the value and can be used // to expose information about the cache status for debugging or testing. -type ResultMeta = cacheshim.ResultMeta +type ResultMeta struct { + // Hit indicates whether or not the request was a cache hit + Hit bool + + // Age identifies how "stale" the result is. It's semantics differ based on + // whether or not the cache type performs background refresh or not as defined + // in https://www.consul.io/api/index.html#agent-caching. + // + // For background refresh types, Age is 0 unless the background blocking query + // is currently in a failed state and so not keeping up with the server's + // values. If it is non-zero it represents the time since the first failure to + // connect during background refresh, and is reset after a background request + // does manage to reconnect and either return successfully, or block for at + // least the yamux keepalive timeout of 30 seconds (which indicates the + // connection is OK but blocked as expected). + // + // For simple cache types, Age is the time since the result being returned was + // fetched from the servers. + Age time.Duration + + // Index is the internal ModifyIndex for the cache entry. Not all types + // support blocking and all that do will likely have this in their result type + // already but this allows generic code to reason about whether cache values + // have changed. + Index uint64 +} // Options are options for the Cache. type Options struct { diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index 6a4216c85929a..4ab66a29d0bf0 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cache diff --git a/agent/cache/entry.go b/agent/cache/entry.go index 9ee1fc0007fa9..fb8008d8c15ee 100644 --- a/agent/cache/entry.go +++ b/agent/cache/entry.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cache diff --git a/agent/cache/request.go b/agent/cache/request.go index 92f5b6e1ffa68..7f66f4ce58819 100644 --- a/agent/cache/request.go +++ b/agent/cache/request.go @@ -1,10 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cache import ( - "github.com/hashicorp/consul/agent/cacheshim" + "time" ) // Request is a cacheable request. @@ -13,7 +13,10 @@ import ( // the agent/structs package. // //go:generate mockery --name Request --inpackage -type Request = cacheshim.Request +type Request interface { + // CacheInfo returns information used for caching this request. + CacheInfo() RequestInfo +} // RequestInfo represents cache information for a request. The caching // framework uses this to control the behavior of caching and to determine @@ -21,4 +24,53 @@ type Request = cacheshim.Request // // TODO(peering): finish ensuring everything that sets a Datacenter sets or doesn't set PeerName. // TODO(peering): also make sure the peer name is present in the cache key likely in lieu of the datacenter somehow. -type RequestInfo = cacheshim.RequestInfo +type RequestInfo struct { + // Key is a unique cache key for this request. This key should + // be globally unique to identify this request, since any conflicting + // cache keys could result in invalid data being returned from the cache. + // The Key does not need to include ACL or DC information, since the + // cache already partitions by these values prior to using this key. + Key string + + // Token is the ACL token associated with this request. + // + // Datacenter is the datacenter that the request is targeting. + // + // PeerName is the peer that the request is targeting. + // + // All of these values are used to partition the cache. The cache framework + // today partitions data on these values to simplify behavior: by + // partitioning ACL tokens, the cache doesn't need to be smart about + // filtering results. By filtering datacenter/peer results, the cache can + // service the multi-DC/multi-peer nature of Consul. This comes at the expense of + // working set size, but in general the effect is minimal. + Token string + Datacenter string + PeerName string + + // MinIndex is the minimum index being queried. This is used to + // determine if we already have data satisfying the query or if we need + // to block until new data is available. If no index is available, the + // default value (zero) is acceptable. + MinIndex uint64 + + // Timeout is the timeout for waiting on a blocking query. When the + // timeout is reached, the last known value is returned (or maybe nil + // if there was no prior value). This "last known value" behavior matches + // normal Consul blocking queries. + Timeout time.Duration + + // MaxAge if set limits how stale a cache entry can be. If it is non-zero and + // there is an entry in cache that is older than specified, it is treated as a + // cache miss and re-fetched. It is ignored for cachetypes with Refresh = + // true. + MaxAge time.Duration + + // MustRevalidate forces a new lookup of the cache even if there is an + // existing one that has not expired. It is implied by HTTP requests with + // `Cache-Control: max-age=0` but we can't distinguish that case from the + // unset case for MaxAge. Later we may support revalidating the index without + // a full re-fetch but for now the only option is to refetch. It is ignored + // for cachetypes with Refresh = true. + MustRevalidate bool +} diff --git a/agent/cache/testing.go b/agent/cache/testing.go index b754dae3e76a2..7f0df113bc839 100644 --- a/agent/cache/testing.go +++ b/agent/cache/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cache diff --git a/agent/cache/type.go b/agent/cache/type.go index ccab3216ca837..d58362fd470d7 100644 --- a/agent/cache/type.go +++ b/agent/cache/type.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cache diff --git a/agent/cache/watch.go b/agent/cache/watch.go index 111ac85acb3ef..d8693ad032f99 100644 --- a/agent/cache/watch.go +++ b/agent/cache/watch.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cache @@ -9,17 +9,26 @@ import ( "reflect" "time" - "google.golang.org/protobuf/proto" - - "github.com/hashicorp/consul/agent/cacheshim" "github.com/hashicorp/consul/lib" + "google.golang.org/protobuf/proto" ) // UpdateEvent is a struct summarizing an update to a cache entry -type UpdateEvent = cacheshim.UpdateEvent +type UpdateEvent struct { + // CorrelationID is used by the Notify API to allow correlation of updates + // with specific requests. We could return the full request object and + // cachetype for consumers to match against the calls they made but in + // practice it's cleaner for them to choose the minimal necessary unique + // identifier given the set of things they are watching. They might even + // choose to assign random IDs for example. + CorrelationID string + Result interface{} + Meta ResultMeta + Err error +} // Callback is the function type accepted by NotifyCallback. -type Callback = cacheshim.Callback +type Callback func(ctx context.Context, event UpdateEvent) // Notify registers a desire to be updated about changes to a cache result. // @@ -117,7 +126,7 @@ func (c *Cache) notifyBlockingQuery(ctx context.Context, r getOptions, correlati // Check the index of the value returned in the cache entry to be sure it // changed if index == 0 || index < meta.Index { - cb(ctx, UpdateEvent{CorrelationID: correlationID, Result: res, Meta: meta, Err: err}) + cb(ctx, UpdateEvent{correlationID, res, meta, err}) // Update index for next request index = meta.Index @@ -177,7 +186,7 @@ func (c *Cache) notifyPollingQuery(ctx context.Context, r getOptions, correlatio // Check for a change in the value or an index change if index < meta.Index || !isEqual(lastValue, res) { - cb(ctx, UpdateEvent{CorrelationID: correlationID, Result: res, Meta: meta, Err: err}) + cb(ctx, UpdateEvent{correlationID, res, meta, err}) // Update index and lastValue lastValue = res diff --git a/agent/cache/watch_test.go b/agent/cache/watch_test.go index 41c30f4dbb5cf..e6a5848f4ccdd 100644 --- a/agent/cache/watch_test.go +++ b/agent/cache/watch_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package cache diff --git a/agent/cacheshim/cache.go b/agent/cacheshim/cache.go deleted file mode 100644 index 64754da644865..0000000000000 --- a/agent/cacheshim/cache.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package cacheshim - -import ( - "context" - "time" -) - -// cacheshim defines any shared cache types for any packages that don't want to have a dependency on the agent cache. -// This was created as part of a refactor to remove agent/leafcert package's dependency on agent/cache. - -type ResultMeta struct { - // Hit indicates whether or not the request was a cache hit - Hit bool - - // Age identifies how "stale" the result is. It's semantics differ based on - // whether or not the cache type performs background refresh or not as defined - // in https://www.consul.io/api/index.html#agent-caching. - // - // For background refresh types, Age is 0 unless the background blocking query - // is currently in a failed state and so not keeping up with the server's - // values. If it is non-zero it represents the time since the first failure to - // connect during background refresh, and is reset after a background request - // does manage to reconnect and either return successfully, or block for at - // least the yamux keepalive timeout of 30 seconds (which indicates the - // connection is OK but blocked as expected). - // - // For simple cache types, Age is the time since the result being returned was - // fetched from the servers. - Age time.Duration - - // Index is the internal ModifyIndex for the cache entry. Not all types - // support blocking and all that do will likely have this in their result type - // already but this allows generic code to reason about whether cache values - // have changed. - Index uint64 -} - -type Request interface { - // CacheInfo returns information used for caching this request. - CacheInfo() RequestInfo -} - -type RequestInfo struct { - // Key is a unique cache key for this request. This key should - // be globally unique to identify this request, since any conflicting - // cache keys could result in invalid data being returned from the cache. - // The Key does not need to include ACL or DC information, since the - // cache already partitions by these values prior to using this key. - Key string - - // Token is the ACL token associated with this request. - // - // Datacenter is the datacenter that the request is targeting. - // - // PeerName is the peer that the request is targeting. - // - // All of these values are used to partition the cache. The cache framework - // today partitions data on these values to simplify behavior: by - // partitioning ACL tokens, the cache doesn't need to be smart about - // filtering results. By filtering datacenter/peer results, the cache can - // service the multi-DC/multi-peer nature of Consul. This comes at the expense of - // working set size, but in general the effect is minimal. - Token string - Datacenter string - PeerName string - - // MinIndex is the minimum index being queried. This is used to - // determine if we already have data satisfying the query or if we need - // to block until new data is available. If no index is available, the - // default value (zero) is acceptable. - MinIndex uint64 - - // Timeout is the timeout for waiting on a blocking query. When the - // timeout is reached, the last known value is returned (or maybe nil - // if there was no prior value). This "last known value" behavior matches - // normal Consul blocking queries. - Timeout time.Duration - - // MaxAge if set limits how stale a cache entry can be. If it is non-zero and - // there is an entry in cache that is older than specified, it is treated as a - // cache miss and re-fetched. It is ignored for cachetypes with Refresh = - // true. - MaxAge time.Duration - - // MustRevalidate forces a new lookup of the cache even if there is an - // existing one that has not expired. It is implied by HTTP requests with - // `Cache-Control: max-age=0` but we can't distinguish that case from the - // unset case for MaxAge. Later we may support revalidating the index without - // a full re-fetch but for now the only option is to refetch. It is ignored - // for cachetypes with Refresh = true. - MustRevalidate bool -} - -type UpdateEvent struct { - // CorrelationID is used by the Notify API to allow correlation of updates - // with specific requests. We could return the full request object and - // cachetype for consumers to match against the calls they made but in - // practice it's cleaner for them to choose the minimal necessary unique - // identifier given the set of things they are watching. They might even - // choose to assign random IDs for example. - CorrelationID string - Result interface{} - Meta ResultMeta - Err error -} - -type Callback func(ctx context.Context, event UpdateEvent) - -type Cache interface { - Get(ctx context.Context, t string, r Request) (interface{}, ResultMeta, error) - NotifyCallback(ctx context.Context, t string, r Request, correlationID string, cb Callback) error - Notify(ctx context.Context, t string, r Request, correlationID string, ch chan<- UpdateEvent) error -} - -const ConnectCARootName = "connect-ca-root" diff --git a/agent/catalog_endpoint.go b/agent/catalog_endpoint.go index 8af4654b90f2a..ad72c4b47f353 100644 --- a/agent/catalog_endpoint.go +++ b/agent/catalog_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -13,7 +13,6 @@ import ( cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/dnsutil" ) var CatalogCounters = []prometheus.CounterDefinition{ @@ -258,7 +257,7 @@ RETRY_ONCE: } out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() - s.agent.TranslateAddresses(args.Datacenter, out.Nodes, dnsutil.TranslateAddressAcceptAny) + s.agent.TranslateAddresses(args.Datacenter, out.Nodes, TranslateAddressAcceptAny) // Use empty list instead of nil if out.Nodes == nil { @@ -404,7 +403,7 @@ func (s *HTTPHandlers) catalogServiceNodes(resp http.ResponseWriter, req *http.R } out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() - s.agent.TranslateAddresses(args.Datacenter, out.ServiceNodes, dnsutil.TranslateAddressAcceptAny) + s.agent.TranslateAddresses(args.Datacenter, out.ServiceNodes, TranslateAddressAcceptAny) // Use empty list instead of nil if out.ServiceNodes == nil { @@ -458,7 +457,7 @@ RETRY_ONCE: } out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() if out.NodeServices != nil { - s.agent.TranslateAddresses(args.Datacenter, out.NodeServices, dnsutil.TranslateAddressAcceptAny) + s.agent.TranslateAddresses(args.Datacenter, out.NodeServices, TranslateAddressAcceptAny) } // TODO: The NodeServices object in IndexedNodeServices is a pointer to @@ -522,7 +521,7 @@ RETRY_ONCE: goto RETRY_ONCE } out.ConsistencyLevel = args.QueryOptions.ConsistencyLevel() - s.agent.TranslateAddresses(args.Datacenter, &out.NodeServices, dnsutil.TranslateAddressAcceptAny) + s.agent.TranslateAddresses(args.Datacenter, &out.NodeServices, TranslateAddressAcceptAny) // Use empty list instead of nil for _, s := range out.NodeServices.Services { diff --git a/agent/catalog_endpoint_ce.go b/agent/catalog_endpoint_ce.go index 68433e2196d7c..fcd8311356d87 100644 --- a/agent/catalog_endpoint_ce.go +++ b/agent/catalog_endpoint_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/catalog_endpoint_test.go b/agent/catalog_endpoint_test.go index 10b1c8b887b5e..da65097dbb581 100644 --- a/agent/catalog_endpoint_test.go +++ b/agent/catalog_endpoint_test.go @@ -1,73 +1,29 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent import ( "context" "fmt" - "io" "net/http" "net/http/httptest" "net/url" - "strings" "testing" "time" + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/serf/coordinate" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" ) -func TestCatalogEndpointsFailInV2(t *testing.T) { - t.Parallel() - - a := NewTestAgent(t, `experiments = ["resource-apis"]`) - - checkRequest := func(method, url string) { - t.Run(method+" "+url, func(t *testing.T) { - assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, "{}") - }) - } - - checkRequest("PUT", "/v1/catalog/register") - checkRequest("GET", "/v1/catalog/connect/") - checkRequest("PUT", "/v1/catalog/deregister") - checkRequest("GET", "/v1/catalog/datacenters") - checkRequest("GET", "/v1/catalog/nodes") - checkRequest("GET", "/v1/catalog/services") - checkRequest("GET", "/v1/catalog/service/") - checkRequest("GET", "/v1/catalog/node/") - checkRequest("GET", "/v1/catalog/node-services/") - checkRequest("GET", "/v1/catalog/gateway-services/") -} - -func assertV1CatalogEndpointDoesNotWorkWithV2(t *testing.T, a *TestAgent, method, url string, requestBody string) { - var body io.Reader - switch method { - case http.MethodPost, http.MethodPut: - body = strings.NewReader(requestBody + "\n") - } - - req, err := http.NewRequest(method, url, body) - require.NoError(t, err) - - resp := httptest.NewRecorder() - a.srv.h.ServeHTTP(resp, req) - require.Equal(t, http.StatusBadRequest, resp.Code) - - got, err := io.ReadAll(resp.Body) - require.NoError(t, err) - - require.Contains(t, string(got), structs.ErrUsingV2CatalogExperiment.Error()) -} - func TestCatalogRegister_PeeringRegistration(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -1167,7 +1123,7 @@ func TestCatalogServiceNodes_DistanceSort(t *testing.T) { r.Fatalf("err: %v", err) } - assertIndex(r, resp) + assertIndex(t, resp) nodes = obj.(structs.ServiceNodes) if len(nodes) != 2 { r.Fatalf("bad: %v", obj) diff --git a/agent/check.go b/agent/check.go index 078361be66010..79c030d932424 100644 --- a/agent/check.go +++ b/agent/check.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/checks/alias.go b/agent/checks/alias.go index f75c05b9580bb..5e394105cf1c9 100644 --- a/agent/checks/alias.go +++ b/agent/checks/alias.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package checks diff --git a/agent/checks/alias_test.go b/agent/checks/alias_test.go index 1f5662019929a..70a301d1180ff 100644 --- a/agent/checks/alias_test.go +++ b/agent/checks/alias_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package checks diff --git a/agent/checks/check.go b/agent/checks/check.go index cabecddcf5a21..c6472f1fb97e5 100644 --- a/agent/checks/check.go +++ b/agent/checks/check.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package checks diff --git a/agent/checks/check_test.go b/agent/checks/check_test.go index ae53b477f5554..389b4cb14100a 100644 --- a/agent/checks/check_test.go +++ b/agent/checks/check_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package checks diff --git a/agent/checks/check_windows_test.go b/agent/checks/check_windows_test.go index 2f6d40f243648..b7c14dd18e853 100644 --- a/agent/checks/check_windows_test.go +++ b/agent/checks/check_windows_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build windows +// +build windows package checks diff --git a/agent/checks/docker.go b/agent/checks/docker.go index e3483e073b037..11bcac7e01c84 100644 --- a/agent/checks/docker.go +++ b/agent/checks/docker.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package checks diff --git a/agent/checks/docker_unix.go b/agent/checks/docker_unix.go index 6d169f2bbc3cd..33c8a2b817223 100644 --- a/agent/checks/docker_unix.go +++ b/agent/checks/docker_unix.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !windows +// +build !windows package checks diff --git a/agent/checks/docker_windows.go b/agent/checks/docker_windows.go index 6008b695ba1b0..edcb4f380a988 100644 --- a/agent/checks/docker_windows.go +++ b/agent/checks/docker_windows.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package checks diff --git a/agent/checks/grpc.go b/agent/checks/grpc.go index b3bcba20b5a61..87378521c9dfa 100644 --- a/agent/checks/grpc.go +++ b/agent/checks/grpc.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package checks diff --git a/agent/checks/grpc_test.go b/agent/checks/grpc_test.go index e67b453bda62a..4500bcd67f3b5 100644 --- a/agent/checks/grpc_test.go +++ b/agent/checks/grpc_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package checks diff --git a/agent/checks/os_service.go b/agent/checks/os_service.go index 3350c73a2c3b4..af4e9b03ee873 100644 --- a/agent/checks/os_service.go +++ b/agent/checks/os_service.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package checks diff --git a/agent/checks/os_service_unix.go b/agent/checks/os_service_unix.go index 4c61ac0babc7e..ab004e29fd9c3 100644 --- a/agent/checks/os_service_unix.go +++ b/agent/checks/os_service_unix.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !windows +// +build !windows package checks diff --git a/agent/checks/os_service_windows.go b/agent/checks/os_service_windows.go index f142ea698ec5c..8b73ce4ad2091 100644 --- a/agent/checks/os_service_windows.go +++ b/agent/checks/os_service_windows.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build windows +// +build windows package checks diff --git a/agent/config/agent_limits.go b/agent/config/agent_limits.go index 7abbb075d3166..fff5e267f203c 100644 --- a/agent/config/agent_limits.go +++ b/agent/config/agent_limits.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/builder.go b/agent/config/builder.go index 883be2b74836a..089da4bedd47b 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -1,10 +1,9 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config import ( - "crypto/tls" "encoding/base64" "encoding/json" "errors" @@ -35,11 +34,11 @@ import ( "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/consul/authmethod/ssoauth" consulrate "github.com/hashicorp/consul/agent/consul/rate" + "github.com/hashicorp/consul/agent/dns" hcpconfig "github.com/hashicorp/consul/agent/hcp/config" "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" - "github.com/hashicorp/consul/internal/dnsutil" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/stringslice" @@ -317,10 +316,8 @@ func formatFromFileExtension(name string) string { type byName []os.FileInfo -func (a byName) Len() int { return len(a) } - -func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byName) Less(i, j int) bool { return a[i].Name() < a[j].Name() } // build constructs the runtime configuration from the config sources @@ -885,7 +882,6 @@ func (b *builder) build() (rt RuntimeConfig, err error) { ACLAgentRecoveryToken: stringVal(c.ACL.Tokens.AgentRecovery), ACLReplicationToken: stringVal(c.ACL.Tokens.Replication), ACLConfigFileRegistrationToken: stringVal(c.ACL.Tokens.ConfigFileRegistration), - ACLDNSToken: stringVal(c.ACL.Tokens.DNS), }, // Autopilot @@ -1001,7 +997,6 @@ func (b *builder) build() (rt RuntimeConfig, err error) { DataDir: dataDir, Datacenter: datacenter, DefaultQueryTime: b.durationVal("default_query_time", c.DefaultQueryTime), - DefaultIntentionPolicy: stringVal(c.DefaultIntentionPolicy), DevMode: boolVal(b.opts.DevMode), DisableAnonymousSignature: boolVal(c.DisableAnonymousSignature), DisableCoordinates: boolVal(c.DisableCoordinates), @@ -1114,8 +1109,8 @@ func (b *builder) build() (rt RuntimeConfig, err error) { LocalProxyConfigResyncInterval: 30 * time.Second, } - // host metrics are enabled by default to support HashiCorp Cloud Platform integration - rt.Telemetry.EnableHostMetrics = boolValWithDefault(c.Telemetry.EnableHostMetrics, true) + // host metrics are enabled by default if consul is configured with HashiCorp Cloud Platform integration + rt.Telemetry.EnableHostMetrics = boolValWithDefault(c.Telemetry.EnableHostMetrics, rt.IsCloudEnabled()) rt.TLS, err = b.buildTLSConfig(rt, c.TLS) if err != nil { @@ -1141,23 +1136,6 @@ func (b *builder) build() (rt RuntimeConfig, err error) { return RuntimeConfig{}, fmt.Errorf("cache.entry_fetch_rate must be strictly positive, was: %v", rt.Cache.EntryFetchRate) } - // TODO(CC-6389): Remove once resource-apis is no longer considered experimental and is supported by HCP - if stringslice.Contains(rt.Experiments, consul.CatalogResourceExperimentName) && rt.IsCloudEnabled() { - // Allow override of this check for development/testing purposes. Should not be used in production - if !stringslice.Contains(rt.Experiments, consul.HCPAllowV2ResourceAPIs) { - return RuntimeConfig{}, fmt.Errorf("`experiments` cannot include 'resource-apis' when HCP `cloud` configuration is set") - } - } - - // For now, disallow usage of several v2 experiments in secondary datacenters. - if rt.ServerMode && rt.PrimaryDatacenter != rt.Datacenter { - for _, name := range rt.Experiments { - if !consul.IsExperimentAllowedOnSecondaries(name) { - return RuntimeConfig{}, fmt.Errorf("`experiments` cannot include `%s` for servers in secondary datacenters", name) - } - } - } - if rt.UIConfig.MetricsProvider == "prometheus" { // Handle defaulting for the built-in version of prometheus. if len(rt.UIConfig.MetricsProxy.PathAllowlist) == 0 { @@ -1300,7 +1278,7 @@ func (b *builder) validate(rt RuntimeConfig) error { switch { case rt.NodeName == "": return fmt.Errorf("node_name cannot be empty") - case dnsutil.InvalidNameRe.MatchString(rt.NodeName): + case dns.InvalidNameRe.MatchString(rt.NodeName): b.warn("Node name %q will not be discoverable "+ "via DNS due to invalid characters. Valid characters include "+ "all alpha-numerics and dashes.", rt.NodeName) @@ -1308,16 +1286,12 @@ func (b *builder) validate(rt RuntimeConfig) error { // todo(kyhavlov): Add stronger validation here for node names. b.warn("Found invalid characters in node name %q - whitespace and quotes "+ "(', \", `) cannot be used with auto-config.", rt.NodeName) - case len(rt.NodeName) > dnsutil.MaxLabelLength: + case len(rt.NodeName) > dns.MaxLabelLength: b.warn("Node name %q will not be discoverable "+ "via DNS due to it being too long. Valid lengths are between "+ "1 and 63 bytes.", rt.NodeName) } - if err := rt.StructLocality().Validate(); err != nil { - return fmt.Errorf("locality is invalid: %s", err) - } - if ipaddr.IsAny(rt.AdvertiseAddrLAN.IP) { return fmt.Errorf("Advertise address cannot be 0.0.0.0, :: or [::]") } @@ -1428,7 +1402,7 @@ func (b *builder) validate(rt RuntimeConfig) error { // Raft LogStore validation if rt.RaftLogStoreConfig.Backend != consul.LogStoreBackendBoltDB && - rt.RaftLogStoreConfig.Backend != consul.LogStoreBackendWAL && rt.RaftLogStoreConfig.Backend != consul.LogStoreBackendDefault { + rt.RaftLogStoreConfig.Backend != consul.LogStoreBackendWAL { return fmt.Errorf("raft_logstore.backend must be one of '%s' or '%s'", consul.LogStoreBackendBoltDB, consul.LogStoreBackendWAL) } @@ -1497,7 +1471,7 @@ func (b *builder) validate(rt RuntimeConfig) error { return err } case structs.VaultCAProvider: - if _, err := ca.ParseVaultCAConfig(rt.ConnectCAConfig, rt.PrimaryDatacenter == rt.Datacenter); err != nil { + if _, err := ca.ParseVaultCAConfig(rt.ConnectCAConfig); err != nil { return err } case structs.AWSCAProvider: @@ -1744,21 +1718,10 @@ func (b *builder) serviceVal(v *ServiceDefinition) *structs.ServiceDefinition { Checks: checks, Proxy: b.serviceProxyVal(v.Proxy), Connect: b.serviceConnectVal(v.Connect), - Locality: b.serviceLocalityVal(v.Locality), EnterpriseMeta: v.EnterpriseMeta.ToStructs(), } } -func (b *builder) serviceLocalityVal(l *Locality) *structs.Locality { - if l == nil { - return nil - } - return &structs.Locality{ - Region: stringVal(l.Region), - Zone: stringVal(l.Zone), - } -} - func (b *builder) serviceKindVal(v *string) structs.ServiceKind { if v == nil { return structs.ServiceKindTypical @@ -1840,14 +1803,14 @@ func (b *builder) meshGatewayConfVal(mgConf *MeshGatewayConfig) structs.MeshGate return cfg } -func (b *builder) dnsRecursorStrategyVal(v string) structs.RecursorStrategy { - var out structs.RecursorStrategy +func (b *builder) dnsRecursorStrategyVal(v string) dns.RecursorStrategy { + var out dns.RecursorStrategy - switch structs.RecursorStrategy(v) { - case structs.RecursorStrategyRandom: - out = structs.RecursorStrategyRandom - case structs.RecursorStrategySequential, "": - out = structs.RecursorStrategySequential + switch dns.RecursorStrategy(v) { + case dns.RecursorStrategyRandom: + out = dns.RecursorStrategyRandom + case dns.RecursorStrategySequential, "": + out = dns.RecursorStrategySequential default: b.err = multierror.Append(b.err, fmt.Errorf("dns_config.recursor_strategy: invalid strategy: %q", v)) } @@ -2582,38 +2545,10 @@ func validateAutoConfigAuthorizer(rt RuntimeConfig) error { } func (b *builder) cloudConfigVal(v Config) hcpconfig.CloudConfig { - // Load the same environment variables expected by hcp-sdk-go - envHostname, ok := os.LookupEnv("HCP_API_ADDRESS") - if !ok { - if legacyEnvHostname, ok := os.LookupEnv("HCP_API_HOST"); ok { - // Remove only https scheme prefixes from the deprecated environment - // variable for specifying the API host. Mirrors the same behavior as - // hcp-sdk-go. - if strings.HasPrefix(strings.ToLower(legacyEnvHostname), "https://") { - legacyEnvHostname = legacyEnvHostname[8:] - } - envHostname = legacyEnvHostname - } - } - - var envTLSConfig *tls.Config - if os.Getenv("HCP_AUTH_TLS") == "insecure" || - os.Getenv("HCP_SCADA_TLS") == "insecure" || - os.Getenv("HCP_API_TLS") == "insecure" { - envTLSConfig = &tls.Config{InsecureSkipVerify: true} - } - val := hcpconfig.CloudConfig{ - ResourceID: os.Getenv("HCP_RESOURCE_ID"), - ClientID: os.Getenv("HCP_CLIENT_ID"), - ClientSecret: os.Getenv("HCP_CLIENT_SECRET"), - AuthURL: os.Getenv("HCP_AUTH_URL"), - Hostname: envHostname, - ScadaAddress: os.Getenv("HCP_SCADA_ADDRESS"), - TLSConfig: envTLSConfig, + ResourceID: os.Getenv("HCP_RESOURCE_ID"), } - - // Node id might get overridden in setup.go:142 + // Node id might get overriden in setup.go:142 nodeID := stringVal(v.NodeID) val.NodeID = types.NodeID(nodeID) val.NodeName = b.nodeName(v.NodeName) @@ -2622,31 +2557,15 @@ func (b *builder) cloudConfigVal(v Config) hcpconfig.CloudConfig { return val } - // Load configuration file variables for anything not set by environment variables - if val.AuthURL == "" { - val.AuthURL = stringVal(v.Cloud.AuthURL) - } - - if val.Hostname == "" { - val.Hostname = stringVal(v.Cloud.Hostname) - } + val.ClientID = stringVal(v.Cloud.ClientID) + val.ClientSecret = stringVal(v.Cloud.ClientSecret) + val.AuthURL = stringVal(v.Cloud.AuthURL) + val.Hostname = stringVal(v.Cloud.Hostname) + val.ScadaAddress = stringVal(v.Cloud.ScadaAddress) - if val.ScadaAddress == "" { - val.ScadaAddress = stringVal(v.Cloud.ScadaAddress) + if resourceID := stringVal(v.Cloud.ResourceID); resourceID != "" { + val.ResourceID = resourceID } - - if val.ResourceID == "" { - val.ResourceID = stringVal(v.Cloud.ResourceID) - } - - if val.ClientID == "" { - val.ClientID = stringVal(v.Cloud.ClientID) - } - - if val.ClientSecret == "" { - val.ClientSecret = stringVal(v.Cloud.ClientSecret) - } - return val } @@ -2733,10 +2652,10 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error return c, errors.New("verify_outgoing is not valid in the tls.grpc stanza") } - // Similarly, only the internal RPC and defaults configuration honor VerifyServerHostname + // Similarly, only the internal RPC configuration honors VerifyServerHostname // so we call it out here too. - if t.GRPC.VerifyServerHostname != nil || t.HTTPS.VerifyServerHostname != nil { - return c, errors.New("verify_server_hostname is only valid in the tls.defaults and tls.internal_rpc stanzas") + if t.Defaults.VerifyServerHostname != nil || t.GRPC.VerifyServerHostname != nil || t.HTTPS.VerifyServerHostname != nil { + return c, errors.New("verify_server_hostname is only valid in the tls.internal_rpc stanza") } // And UseAutoCert right now only applies to external gRPC interface. @@ -2786,11 +2705,8 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error } mapCommon("internal_rpc", t.InternalRPC, &c.InternalRPC) + c.InternalRPC.VerifyServerHostname = boolVal(t.InternalRPC.VerifyServerHostname) - c.InternalRPC.VerifyServerHostname = boolVal(t.Defaults.VerifyServerHostname) - if t.InternalRPC.VerifyServerHostname != nil { - c.InternalRPC.VerifyServerHostname = boolVal(t.InternalRPC.VerifyServerHostname) - } // Setting only verify_server_hostname is documented to imply verify_outgoing. // If it doesn't then we risk sending communication over plain TCP when we // documented it as forcing TLS for RPCs. Enforce this here rather than in @@ -2851,7 +2767,7 @@ func (b *builder) parsePrefixFilter(telemetry *Telemetry) ([]string, []string) { func (b *builder) raftLogStoreConfigVal(raw *RaftLogStoreRaw) consul.RaftLogStoreConfig { var cfg consul.RaftLogStoreConfig if raw != nil { - cfg.Backend = stringValWithDefault(raw.Backend, consul.LogStoreBackendDefault) + cfg.Backend = stringValWithDefault(raw.Backend, consul.LogStoreBackendBoltDB) cfg.DisableLogCache = boolVal(raw.DisableLogCache) cfg.Verification.Enabled = boolVal(raw.Verification.Enabled) diff --git a/agent/config/builder_ce.go b/agent/config/builder_ce.go index 068e02f0ce8b1..dae1e275c96a6 100644 --- a/agent/config/builder_ce.go +++ b/agent/config/builder_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package config diff --git a/agent/config/builder_ce_test.go b/agent/config/builder_ce_test.go index 9c3114ddc18ea..100f905859da6 100644 --- a/agent/config/builder_ce_test.go +++ b/agent/config/builder_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package config diff --git a/agent/config/builder_test.go b/agent/config/builder_test.go index b1a8ad8c5acba..3eb81fdee4de6 100644 --- a/agent/config/builder_test.go +++ b/agent/config/builder_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config @@ -15,7 +15,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - hcpconfig "github.com/hashicorp/consul/agent/hcp/config" "github.com/hashicorp/consul/types" ) @@ -576,240 +575,3 @@ func TestBuidler_hostMetricsWithCloud(t *testing.T) { require.NotNil(t, cfg) require.True(t, cfg.Telemetry.EnableHostMetrics) } - -func TestBuilder_CheckExperimentsInSecondaryDatacenters(t *testing.T) { - - type testcase struct { - hcl string - expectErr bool - } - - run := func(t *testing.T, tc testcase) { - // using dev mode skips the need for a data dir - devMode := true - builderOpts := LoadOpts{ - DevMode: &devMode, - Overrides: []Source{ - FileSource{ - Name: "overrides", - Format: "hcl", - Data: tc.hcl, - }, - }, - } - _, err := Load(builderOpts) - if tc.expectErr { - require.Error(t, err) - require.Contains(t, err.Error(), "`experiments` cannot include") - } else { - require.NoError(t, err) - } - } - - const ( - primary = `server = true primary_datacenter = "dc1" datacenter = "dc1" ` - secondary = `server = true primary_datacenter = "dc1" datacenter = "dc2" ` - ) - - cases := map[string]testcase{ - "primary server no experiments": { - hcl: primary + `experiments = []`, - }, - "primary server v2catalog": { - hcl: primary + `experiments = ["resource-apis"]`, - }, - "primary server v2dns": { - hcl: primary + `experiments = ["v2dns"]`, - }, - "primary server v2tenancy": { - hcl: primary + `experiments = ["v2tenancy"]`, - }, - "secondary server no experiments": { - hcl: secondary + `experiments = []`, - }, - "secondary server v2catalog": { - hcl: secondary + `experiments = ["resource-apis"]`, - expectErr: true, - }, - "secondary server v2dns": { - hcl: secondary + `experiments = ["v2dns"]`, - expectErr: true, - }, - "secondary server v2tenancy": { - hcl: secondary + `experiments = ["v2tenancy"]`, - expectErr: true, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - run(t, tc) - }) - } -} - -func TestBuilder_WarnCloudConfigWithResourceApis(t *testing.T) { - tests := []struct { - name string - hcl string - expectErr bool - }{ - { - name: "base_case", - hcl: ``, - }, - { - name: "resource-apis_no_cloud", - hcl: `experiments = ["resource-apis"]`, - }, - { - name: "cloud-config_no_experiments", - hcl: `cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`, - }, - { - name: "cloud-config_resource-apis_experiment", - hcl: ` - experiments = ["resource-apis"] - cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`, - expectErr: true, - }, - { - name: "cloud-config_other_experiment", - hcl: ` - experiments = ["test"] - cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`, - }, - { - name: "cloud-config_resource-apis_experiment_override", - hcl: ` - experiments = ["resource-apis", "hcp-v2-resource-apis"] - cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`, - }, - } - for _, tc := range tests { - // using dev mode skips the need for a data dir - devMode := true - builderOpts := LoadOpts{ - DevMode: &devMode, - Overrides: []Source{ - FileSource{ - Name: "overrides", - Format: "hcl", - Data: tc.hcl, - }, - }, - } - _, err := Load(builderOpts) - if tc.expectErr { - require.Error(t, err) - require.Contains(t, err.Error(), "cannot include 'resource-apis' when HCP") - } else { - require.NoError(t, err) - } - } -} - -func TestBuilder_CloudConfigWithEnvironmentVars(t *testing.T) { - tests := map[string]struct { - hcl string - env map[string]string - expected hcpconfig.CloudConfig - }{ - "ConfigurationOnly": { - hcl: `cloud{ resource_id = "config-resource-id" client_id = "config-client-id" - client_secret = "config-client-secret" auth_url = "auth.config.com" - hostname = "api.config.com" scada_address = "scada.config.com"}`, - expected: hcpconfig.CloudConfig{ - ResourceID: "config-resource-id", - ClientID: "config-client-id", - ClientSecret: "config-client-secret", - AuthURL: "auth.config.com", - Hostname: "api.config.com", - ScadaAddress: "scada.config.com", - }, - }, - "EnvVarsOnly": { - env: map[string]string{ - "HCP_RESOURCE_ID": "env-resource-id", - "HCP_CLIENT_ID": "env-client-id", - "HCP_CLIENT_SECRET": "env-client-secret", - "HCP_AUTH_URL": "auth.env.com", - "HCP_API_ADDRESS": "api.env.com", - "HCP_SCADA_ADDRESS": "scada.env.com", - }, - expected: hcpconfig.CloudConfig{ - ResourceID: "env-resource-id", - ClientID: "env-client-id", - ClientSecret: "env-client-secret", - AuthURL: "auth.env.com", - Hostname: "api.env.com", - ScadaAddress: "scada.env.com", - }, - }, - "EnvVarsOverrideConfig": { - hcl: `cloud{ resource_id = "config-resource-id" client_id = "config-client-id" - client_secret = "config-client-secret" auth_url = "auth.config.com" - hostname = "api.config.com" scada_address = "scada.config.com"}`, - env: map[string]string{ - "HCP_RESOURCE_ID": "env-resource-id", - "HCP_CLIENT_ID": "env-client-id", - "HCP_CLIENT_SECRET": "env-client-secret", - "HCP_AUTH_URL": "auth.env.com", - "HCP_API_ADDRESS": "api.env.com", - "HCP_SCADA_ADDRESS": "scada.env.com", - }, - expected: hcpconfig.CloudConfig{ - ResourceID: "env-resource-id", - ClientID: "env-client-id", - ClientSecret: "env-client-secret", - AuthURL: "auth.env.com", - Hostname: "api.env.com", - ScadaAddress: "scada.env.com", - }, - }, - "Combination": { - hcl: `cloud{ resource_id = "config-resource-id" client_id = "config-client-id" - client_secret = "config-client-secret"}`, - env: map[string]string{ - "HCP_AUTH_URL": "auth.env.com", - "HCP_API_ADDRESS": "api.env.com", - "HCP_SCADA_ADDRESS": "scada.env.com", - }, - expected: hcpconfig.CloudConfig{ - ResourceID: "config-resource-id", - ClientID: "config-client-id", - ClientSecret: "config-client-secret", - AuthURL: "auth.env.com", - Hostname: "api.env.com", - ScadaAddress: "scada.env.com", - }, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - for k, v := range tc.env { - t.Setenv(k, v) - } - devMode := true - builderOpts := LoadOpts{ - DevMode: &devMode, - Overrides: []Source{ - FileSource{ - Name: "overrides", - Format: "hcl", - Data: tc.hcl, - }, - }, - } - loaded, err := Load(builderOpts) - require.NoError(t, err) - - nodeName, err := os.Hostname() - require.NoError(t, err) - tc.expected.NodeName = nodeName - - actual := loaded.RuntimeConfig.Cloud - require.Equal(t, tc.expected, actual) - }) - } -} diff --git a/agent/config/config.deepcopy.go b/agent/config/config.deepcopy.go index 2a5ebfce276c7..9e6b3e06ffd77 100644 --- a/agent/config/config.deepcopy.go +++ b/agent/config/config.deepcopy.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - // generated by deep-copy -pointer-receiver -o ./config.deepcopy.go -type RuntimeConfig ./; DO NOT EDIT. package config diff --git a/agent/config/config.go b/agent/config/config.go index 013f14dabf1bd..dbd5c2e4fd24f 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config @@ -165,7 +165,6 @@ type Config struct { DataDir *string `mapstructure:"data_dir" json:"data_dir,omitempty"` Datacenter *string `mapstructure:"datacenter" json:"datacenter,omitempty"` DefaultQueryTime *string `mapstructure:"default_query_time" json:"default_query_time,omitempty"` - DefaultIntentionPolicy *string `mapstructure:"default_intention_policy" json:"default_intention_policy,omitempty"` DisableAnonymousSignature *bool `mapstructure:"disable_anonymous_signature" json:"disable_anonymous_signature,omitempty"` DisableCoordinates *bool `mapstructure:"disable_coordinates" json:"disable_coordinates,omitempty"` DisableHostNodeID *bool `mapstructure:"disable_host_node_id" json:"disable_host_node_id,omitempty"` @@ -405,7 +404,6 @@ type ServiceDefinition struct { EnableTagOverride *bool `mapstructure:"enable_tag_override"` Proxy *ServiceProxy `mapstructure:"proxy"` Connect *ServiceConnect `mapstructure:"connect"` - Locality *Locality `mapstructure:"locality"` EnterpriseMeta `mapstructure:",squash"` } @@ -782,7 +780,6 @@ type Tokens struct { Default *string `mapstructure:"default"` Agent *string `mapstructure:"agent"` ConfigFileRegistration *string `mapstructure:"config_file_service_registration"` - DNS *string `mapstructure:"dns"` // Enterprise Only ManagedServiceProvider []ServiceProviderToken `mapstructure:"managed_service_provider"` diff --git a/agent/config/config_ce.go b/agent/config/config_ce.go index a60a49be25db5..2fc8da58e6da8 100644 --- a/agent/config/config_ce.go +++ b/agent/config/config_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package config diff --git a/agent/config/deep-copy.sh b/agent/config/deep-copy.sh index 159dcf9535d6b..f4a6afaf595ac 100644 --- a/agent/config/deep-copy.sh +++ b/agent/config/deep-copy.sh @@ -2,6 +2,7 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 + readonly PACKAGE_DIR="$(dirname "${BASH_SOURCE[0]}")" cd $PACKAGE_DIR diff --git a/agent/config/default.go b/agent/config/default.go index f07a8bdf46dcf..f4f1141638cef 100644 --- a/agent/config/default.go +++ b/agent/config/default.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config @@ -147,6 +147,7 @@ func DefaultSource() Source { raft_snapshot_interval = "` + cfg.RaftConfig.SnapshotInterval.String() + `" raft_trailing_logs = ` + strconv.Itoa(int(cfg.RaftConfig.TrailingLogs)) + ` raft_logstore { + backend = "boltdb" wal { segment_size_mb = 64 } @@ -210,7 +211,9 @@ func DevSource() Source { ports = { grpc = 8502 } - experiments = [] + experiments = [ + "resource-apis" + ] `, } } diff --git a/agent/config/default_ce.go b/agent/config/default_ce.go index 34caf4b52b71a..f91bb9c7d3600 100644 --- a/agent/config/default_ce.go +++ b/agent/config/default_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package config diff --git a/agent/config/deprecated.go b/agent/config/deprecated.go index 921e3329ffa6b..597095f8e2642 100644 --- a/agent/config/deprecated.go +++ b/agent/config/deprecated.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/deprecated_test.go b/agent/config/deprecated_test.go index 8d03e431f7afb..785c9555084f2 100644 --- a/agent/config/deprecated_test.go +++ b/agent/config/deprecated_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/doc.go b/agent/config/doc.go index 5bfc77d902528..4cbc2c41cfdcb 100644 --- a/agent/config/doc.go +++ b/agent/config/doc.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // Package config contains the command line and config file code for the // consul agent. diff --git a/agent/config/file_watcher.go b/agent/config/file_watcher.go index 2afe19b1a659a..c91bb1dd50cc7 100644 --- a/agent/config/file_watcher.go +++ b/agent/config/file_watcher.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/file_watcher_test.go b/agent/config/file_watcher_test.go index f937d1401195a..02b1cd14117be 100644 --- a/agent/config/file_watcher_test.go +++ b/agent/config/file_watcher_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/flags.go b/agent/config/flags.go index b56a162287c0c..21e1ac612a530 100644 --- a/agent/config/flags.go +++ b/agent/config/flags.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/flags_test.go b/agent/config/flags_test.go index a6c9ee23bd4aa..10df0d6d7f005 100644 --- a/agent/config/flags_test.go +++ b/agent/config/flags_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/flagset.go b/agent/config/flagset.go index af1b06d70ce9a..3b2abe6fdf9a2 100644 --- a/agent/config/flagset.go +++ b/agent/config/flagset.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/golden_test.go b/agent/config/golden_test.go index a9ce20d7bd1af..fb4401efbf4d7 100644 --- a/agent/config/golden_test.go +++ b/agent/config/golden_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/limits.go b/agent/config/limits.go index baad156e63c5d..6b5d466ab639a 100644 --- a/agent/config/limits.go +++ b/agent/config/limits.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !windows +// +build !windows package config diff --git a/agent/config/limits_windows.go b/agent/config/limits_windows.go index f0efc27eaf9a0..d9d3499397b56 100644 --- a/agent/config/limits_windows.go +++ b/agent/config/limits_windows.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build windows +// +build windows package config diff --git a/agent/config/merge.go b/agent/config/merge.go index 64c7c1e974964..f40efdaa87793 100644 --- a/agent/config/merge.go +++ b/agent/config/merge.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/merge_test.go b/agent/config/merge_test.go index 9c2e2a1a07363..13e3cbb186ece 100644 --- a/agent/config/merge_test.go +++ b/agent/config/merge_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/ratelimited_file_watcher.go b/agent/config/ratelimited_file_watcher.go index 41f894837035f..33de08cf2b62e 100644 --- a/agent/config/ratelimited_file_watcher.go +++ b/agent/config/ratelimited_file_watcher.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/ratelimited_file_watcher_test.go b/agent/config/ratelimited_file_watcher_test.go index 8e4415aaa8712..d6a43b6be82be 100644 --- a/agent/config/ratelimited_file_watcher_test.go +++ b/agent/config/ratelimited_file_watcher_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 2ac7ea19d9f12..9c8588e1848dd 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/consul" consulrate "github.com/hashicorp/consul/agent/consul/rate" + "github.com/hashicorp/consul/agent/dns" hcpconfig "github.com/hashicorp/consul/agent/hcp/config" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" @@ -252,7 +253,7 @@ type RuntimeConfig struct { // client agents try the first server in the list every time. // // hcl: dns_config { recursor_strategy = "(random|sequential)" } - DNSRecursorStrategy structs.RecursorStrategy + DNSRecursorStrategy dns.RecursorStrategy // DNSRecursorTimeout specifies the timeout in seconds // for Consul's internal dns client used for recursion. @@ -272,7 +273,7 @@ type RuntimeConfig struct { // Records returned in the ANSWER section of a DNS response for UDP // responses without EDNS support (limited to 512 bytes). // This parameter is deprecated, if you want to limit the number of - // records returned by A or AAAA questions, please use TestDNS_ServiceLookup_Randomize + // records returned by A or AAAA questions, please use DNSARecordLimit // instead. // // hcl: dns_config { udp_answer_limit = int } @@ -564,15 +565,6 @@ type RuntimeConfig struct { // flag: -data-dir string DataDir string - // DefaultIntentionPolicy is used to define a default intention action for all - // sources and destinations. Possible values are "allow", "deny", or "" (blank). - // For compatibility, falls back to ACLResolverSettings.ACLDefaultPolicy (which - // itself has a default of "allow") if left blank. Future versions of Consul - // will default this field to "deny" to be secure by default. - // - // hcl: default_intention_policy = string - DefaultIntentionPolicy string - // DefaultQueryTime is the amount of time a blocking query will wait before // Consul will force a response. This value can be overridden by the 'wait' // query parameter. diff --git a/agent/config/runtime_ce.go b/agent/config/runtime_ce.go index 4fb54ae079c20..94a6b7fa6a62c 100644 --- a/agent/config/runtime_ce.go +++ b/agent/config/runtime_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package config diff --git a/agent/config/runtime_ce_test.go b/agent/config/runtime_ce_test.go index af6bb1f8babe2..99a2f6789e134 100644 --- a/agent/config/runtime_ce_test.go +++ b/agent/config/runtime_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package config diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index bc2222739c0be..1d03ecd41e606 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config @@ -69,9 +69,6 @@ var defaultGrpcTlsAddr = net.TCPAddrFromAddrPort(netip.MustParseAddrPort("127.0. // checks for warnings on deprecated fields and flags. These tests // should check one option at a time if possible func TestLoad_IntegrationWithFlags(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } dataDir := testutil.TempDir(t, "config") run := func(t *testing.T, tc testCase) { @@ -327,7 +324,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { rt.DevMode = true rt.DisableAnonymousSignature = true rt.DisableKeyringFile = true - rt.Experiments = nil + rt.Experiments = []string{"resource-apis"} rt.EnableDebug = true rt.UIConfig.Enabled = true rt.LeaveOnTerm = false @@ -1041,13 +1038,6 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { }, }, }) - run(t, testCase{ - desc: "locality invalid", - args: []string{`-data-dir=` + dataDir}, - json: []string{`{"locality": {"zone": "us-west-1a"}}`}, - hcl: []string{`locality { zone = "us-west-1a" }`}, - expectedErr: "locality is invalid: zone cannot be set without region", - }) run(t, testCase{ desc: "client addr and ports == 0", args: []string{`-data-dir=` + dataDir}, @@ -2329,6 +2319,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { expected: func(rt *RuntimeConfig) { rt.DataDir = dataDir rt.Cloud = hcpconfig.CloudConfig{ + // ID is only populated from env if not populated from other sources. ResourceID: "env-id", NodeName: "thehostname", NodeID: "", @@ -2370,7 +2361,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { expected: func(rt *RuntimeConfig) { rt.DataDir = dataDir rt.Cloud = hcpconfig.CloudConfig{ - ResourceID: "env-id", + // ID is only populated from env if not populated from other sources. + ResourceID: "file-id", NodeName: "thehostname", } @@ -2791,44 +2783,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { } } `}, - expected: func(rt *RuntimeConfig) { - rt.DataDir = dataDir - rt.TLS.InternalRPC.VerifyServerHostname = true - rt.TLS.InternalRPC.VerifyOutgoing = true - }, - }) - run(t, testCase{ - desc: "verify_server_hostname in the defaults stanza and internal_rpc", - args: []string{ - `-data-dir=` + dataDir, - }, - hcl: []string{` - tls { - defaults { - verify_server_hostname = false - }, - internal_rpc { - verify_server_hostname = true - } - } - `}, - json: []string{` - { - "tls": { - "defaults": { - "verify_server_hostname": false - }, - "internal_rpc": { - "verify_server_hostname": true - } - } - } - `}, - expected: func(rt *RuntimeConfig) { - rt.DataDir = dataDir - rt.TLS.InternalRPC.VerifyServerHostname = true - rt.TLS.InternalRPC.VerifyOutgoing = true - }, + expectedErr: "verify_server_hostname is only valid in the tls.internal_rpc stanza", }) run(t, testCase{ desc: "verify_server_hostname in the grpc stanza", @@ -2851,7 +2806,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { } } `}, - expectedErr: "verify_server_hostname is only valid in the tls.defaults and tls.internal_rpc stanza", + expectedErr: "verify_server_hostname is only valid in the tls.internal_rpc stanza", }) run(t, testCase{ desc: "verify_server_hostname in the https stanza", @@ -2874,7 +2829,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { } } `}, - expectedErr: "verify_server_hostname is only valid in the tls.defaults and tls.internal_rpc stanza", + expectedErr: "verify_server_hostname is only valid in the tls.internal_rpc stanza", }) run(t, testCase{ desc: "translated keys", @@ -5815,74 +5770,6 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { rt.TLS.InternalRPC.VerifyOutgoing = true }, }) - run(t, testCase{ - desc: "tls.defaults.verify_server_hostname implies tls.internal_rpc.verify_outgoing", - args: []string{ - `-data-dir=` + dataDir, - }, - json: []string{` - { - "tls": { - "defaults": { - "verify_server_hostname": true - } - } - } - `}, - hcl: []string{` - tls { - defaults { - verify_server_hostname = true - } - } - `}, - expected: func(rt *RuntimeConfig) { - rt.DataDir = dataDir - - rt.TLS.Domain = "consul." - rt.TLS.NodeName = "thehostname" - - rt.TLS.InternalRPC.VerifyServerHostname = true - rt.TLS.InternalRPC.VerifyOutgoing = true - }, - }) - run(t, testCase{ - desc: "tls.internal_rpc.verify_server_hostname overwrites tls.defaults.verify_server_hostname", - args: []string{ - `-data-dir=` + dataDir, - }, - json: []string{` - { - "tls": { - "defaults": { - "verify_server_hostname": false - }, - "internal_rpc": { - "verify_server_hostname": true - } - } - } - `}, - hcl: []string{` - tls { - defaults { - verify_server_hostname = false - }, - internal_rpc { - verify_server_hostname = true - } - } - `}, - expected: func(rt *RuntimeConfig) { - rt.DataDir = dataDir - - rt.TLS.Domain = "consul." - rt.TLS.NodeName = "thehostname" - - rt.TLS.InternalRPC.VerifyServerHostname = true - rt.TLS.InternalRPC.VerifyOutgoing = true - }, - }) run(t, testCase{ desc: "tls.grpc.use_auto_cert defaults to false", args: []string{ @@ -6011,26 +5898,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { hcl: []string{``}, expected: func(rt *RuntimeConfig) { rt.DataDir = dataDir - rt.RaftLogStoreConfig.Backend = consul.LogStoreBackendDefault - rt.RaftLogStoreConfig.WAL.SegmentSize = 64 * 1024 * 1024 - }, - }) - run(t, testCase{ - desc: "logstore defaults", - args: []string{ - `-data-dir=` + dataDir, - }, - json: []string{` - { - "experiments": ["resource-apis"] - } - `}, - hcl: []string{`experiments=["resource-apis"]`}, - expected: func(rt *RuntimeConfig) { - rt.DataDir = dataDir - rt.RaftLogStoreConfig.Backend = consul.LogStoreBackendDefault + rt.RaftLogStoreConfig.Backend = consul.LogStoreBackendBoltDB rt.RaftLogStoreConfig.WAL.SegmentSize = 64 * 1024 * 1024 - rt.Experiments = []string{"resource-apis"} }, }) run(t, testCase{ @@ -6580,10 +6449,6 @@ func TestLoad_FullConfig(t *testing.T) { KVMaxValueSize: 1234567800, LeaveDrainTime: 8265 * time.Second, LeaveOnTerm: true, - Locality: &Locality{ - Region: strPtr("us-east-2"), - Zone: strPtr("us-east-2b"), - }, Logging: logging.Config{ LogLevel: "k1zo9Spt", LogJSON: true, @@ -6686,10 +6551,6 @@ func TestLoad_FullConfig(t *testing.T) { }, }, }, - Locality: &structs.Locality{ - Region: "us-east-1", - Zone: "us-east-1a", - }, }, { ID: "MRHVMZuD", @@ -6848,10 +6709,6 @@ func TestLoad_FullConfig(t *testing.T) { Connect: &structs.ServiceConnect{ Native: true, }, - Locality: &structs.Locality{ - Region: "us-west-1", - Zone: "us-west-1a", - }, Checks: structs.CheckTypes{ &structs.CheckType{ CheckID: "Zv99e9Ka", diff --git a/agent/config/segment_ce.go b/agent/config/segment_ce.go index 78ba9b87c7fdc..5f8e8cff7d8fc 100644 --- a/agent/config/segment_ce.go +++ b/agent/config/segment_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package config diff --git a/agent/config/segment_ce_test.go b/agent/config/segment_ce_test.go index f58781338184a..1fbaf3c2ae0bb 100644 --- a/agent/config/segment_ce_test.go +++ b/agent/config/segment_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package config diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index da0eec7e247af..ace159197051c 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -17,7 +17,6 @@ "ACLAgentRecoveryToken": "hidden", "ACLAgentToken": "hidden", "ACLConfigFileRegistrationToken": "hidden", - "ACLDNSToken": "hidden", "ACLDefaultToken": "hidden", "ACLReplicationToken": "hidden", "DataDir": "", @@ -134,11 +133,11 @@ "ClientSecret": "hidden", "Hostname": "", "ManagementToken": "hidden", - "NodeID": "", - "NodeName": "", "ResourceID": "cluster1", "ScadaAddress": "", - "TLSConfig": null + "TLSConfig": null, + "NodeID": "", + "NodeName": "" }, "ConfigEntryBootstrap": [], "ConnectCAConfig": {}, @@ -185,7 +184,6 @@ "DNSUseCache": false, "DataDir": "", "Datacenter": "", - "DefaultIntentionPolicy": "", "DefaultQueryTime": "0s", "DevMode": false, "DisableAnonymousSignature": false, diff --git a/agent/config/testdata/full-config.hcl b/agent/config/testdata/full-config.hcl index 4c734265fd41c..14df10487611e 100644 --- a/agent/config/testdata/full-config.hcl +++ b/agent/config/testdata/full-config.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 acl_agent_master_token = "furuQD0b" acl_agent_token = "cOshLOQ2" @@ -317,10 +317,6 @@ limits { write_rate = 101.0 } } -locality = { - region = "us-east-2" - zone = "us-east-2b" -} log_level = "k1zo9Spt" log_json = true max_query_time = "18237s" @@ -514,10 +510,6 @@ service = { connect { native = true } - locality = { - region = "us-west-1" - zone = "us-west-1a" - } } services = [ { @@ -558,10 +550,6 @@ services = [ connect { sidecar_service {} } - locality = { - region = "us-east-1" - zone = "us-east-1a" - } }, { id = "MRHVMZuD" diff --git a/agent/config/testdata/full-config.json b/agent/config/testdata/full-config.json index 30ede7dd18f11..f3514e632302a 100644 --- a/agent/config/testdata/full-config.json +++ b/agent/config/testdata/full-config.json @@ -366,10 +366,6 @@ "write_rate": 101.0 } }, - "locality": { - "region": "us-east-2", - "zone": "us-east-2b" - }, "log_level": "k1zo9Spt", "log_json": true, "max_query_time": "18237s", @@ -602,10 +598,6 @@ ], "connect": { "native": true - }, - "locality": { - "region": "us-west-1", - "zone": "us-west-1a" } }, "services": [ @@ -657,10 +649,6 @@ }, "connect": { "sidecar_service": {} - }, - "locality": { - "region": "us-east-1", - "zone": "us-east-1a" } }, { diff --git a/agent/config_endpoint.go b/agent/config_endpoint.go index 73d755786b85c..396215d78d990 100644 --- a/agent/config_endpoint.go +++ b/agent/config_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -10,12 +10,7 @@ import ( "strings" "github.com/hashicorp/consul/acl" - external "github.com/hashicorp/consul/agent/grpc-external" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/proto/private/pbconfigentry" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" ) const ConfigEntryNotFoundErr string = "Config entry not found" @@ -181,45 +176,3 @@ func (s *HTTPHandlers) parseEntMetaForConfigEntryKind(kind string, req *http.Req } return s.parseEntMetaNoWildcard(req, entMeta) } - -// ExportedServices returns all the exported services by resolving wildcards and sameness groups -// in the exported services configuration entry -func (s *HTTPHandlers) ExportedServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - var entMeta acl.EnterpriseMeta - if err := s.parseEntMetaPartition(req, &entMeta); err != nil { - return nil, err - } - args := pbconfigentry.GetResolvedExportedServicesRequest{ - Partition: entMeta.PartitionOrEmpty(), - } - - var dc string - options := structs.QueryOptions{} - s.parse(resp, req, &dc, &options) - ctx, err := external.ContextWithQueryOptions(req.Context(), options) - if err != nil { - return nil, err - } - - var header metadata.MD - result, err := s.agent.grpcClientConfigEntry.GetResolvedExportedServices(ctx, &args, grpc.Header(&header)) - if err != nil { - return nil, err - } - - meta, err := external.QueryMetaFromGRPCMeta(header) - if err != nil { - return result.Services, fmt.Errorf("could not convert gRPC metadata to query meta: %w", err) - } - if err := setMeta(resp, &meta); err != nil { - return nil, err - } - - svcs := make([]api.ResolvedExportedService, len(result.Services)) - - for idx, svc := range result.Services { - svcs[idx] = *svc.ToAPI() - } - - return svcs, nil -} diff --git a/agent/config_endpoint_test.go b/agent/config_endpoint_test.go index 8697b55e5bf0e..88d0116f05c4d 100644 --- a/agent/config_endpoint_test.go +++ b/agent/config_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -16,27 +16,9 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/testrpc" ) -func TestConfigEndpointsFailInV2(t *testing.T) { - t.Parallel() - - a := NewTestAgent(t, `experiments = ["resource-apis"]`) - - checkRequest := func(method, url string) { - t.Run(method+" "+url, func(t *testing.T) { - assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, `{"kind":"service-defaults", "name":"web"}`) - }) - } - - checkRequest("GET", "/v1/config/service-defaults") - checkRequest("GET", "/v1/config/service-defaults/web") - checkRequest("DELETE", "/v1/config/service-defaults/web") - checkRequest("PUT", "/v1/config") -} - func TestConfig_Get(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -144,6 +126,7 @@ func TestConfig_Get(t *testing.T) { ce.ModifyIndex = 13 ce.Hash = 0 ce.EnterpriseMeta = acl.EnterpriseMeta{} + ce.Hash = 0 out, err := a.srv.marshalJSON(req, obj) require.NoError(t, err) @@ -470,6 +453,7 @@ func TestConfig_Apply_IngressGateway(t *testing.T) { }, }, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + Hash: got.GetHash(), } require.Equal(t, expect, got) } @@ -789,84 +773,3 @@ func TestConfig_Apply_ProxyDefaultsExpose(t *testing.T) { require.Equal(t, expose, entry.Expose) } } - -func TestConfig_Exported_Services(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - a := NewTestAgent(t, "") - testrpc.WaitForTestAgent(t, a.RPC, "dc1") - defer a.Shutdown() - - { - // Register exported services - args := &structs.ExportedServicesConfigEntry{ - Name: "default", - Services: []structs.ExportedService{ - { - Name: "api", - Consumers: []structs.ServiceConsumer{ - { - Peer: "east", - }, - { - Peer: "west", - }, - }, - }, - { - Name: "db", - Consumers: []structs.ServiceConsumer{ - { - Peer: "east", - }, - }, - }, - }, - } - req := structs.ConfigEntryRequest{ - Datacenter: "dc1", - Entry: args, - } - var configOutput bool - require.NoError(t, a.RPC(context.Background(), "ConfigEntry.Apply", &req, &configOutput)) - require.True(t, configOutput) - } - - t.Run("exported services", func(t *testing.T) { - req, _ := http.NewRequest("GET", "/v1/exported-services", nil) - resp := httptest.NewRecorder() - raw, err := a.srv.ExportedServices(resp, req) - require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.Code) - - services, ok := raw.([]api.ResolvedExportedService) - require.True(t, ok) - require.Len(t, services, 2) - assertIndex(t, resp) - - entMeta := acl.DefaultEnterpriseMeta() - - expected := []api.ResolvedExportedService{ - { - Service: "api", - Partition: entMeta.PartitionOrEmpty(), - Namespace: entMeta.NamespaceOrEmpty(), - Consumers: api.ResolvedConsumers{ - Peers: []string{"east", "west"}, - }, - }, - { - Service: "db", - Partition: entMeta.PartitionOrEmpty(), - Namespace: entMeta.NamespaceOrEmpty(), - Consumers: api.ResolvedConsumers{ - Peers: []string{"east"}, - }, - }, - } - require.Equal(t, expected, services) - }) -} diff --git a/agent/configentry/config_entry.go b/agent/configentry/config_entry.go index b10989aa95d79..a4ebb254e0407 100644 --- a/agent/configentry/config_entry.go +++ b/agent/configentry/config_entry.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package configentry diff --git a/agent/configentry/discoverychain.go b/agent/configentry/discoverychain.go index 58bdb81fc20ca..d66b6590e0a42 100644 --- a/agent/configentry/discoverychain.go +++ b/agent/configentry/discoverychain.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package configentry diff --git a/agent/configentry/doc.go b/agent/configentry/doc.go index 7dff4a06621b5..18fd1405ab18c 100644 --- a/agent/configentry/doc.go +++ b/agent/configentry/doc.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // Package configentry contains structs and logic related to the Configuration // Entry subsystem. Currently this is restricted to structs used during diff --git a/agent/configentry/merge_service_config.go b/agent/configentry/merge_service_config.go index d36c152105af3..cc692e789b372 100644 --- a/agent/configentry/merge_service_config.go +++ b/agent/configentry/merge_service_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package configentry @@ -7,7 +7,7 @@ import ( "fmt" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-memdb" + memdb "github.com/hashicorp/go-memdb" "github.com/imdario/mergo" "github.com/mitchellh/copystructure" @@ -26,22 +26,32 @@ type StateStore interface { func MergeNodeServiceWithCentralConfig( ws memdb.WatchSet, state StateStore, - unmergedNS *structs.NodeService, + ns *structs.NodeService, logger hclog.Logger) (uint64, *structs.NodeService, error) { - ns := unmergedNS.WithNormalizedUpstreams() serviceName := ns.Service + var upstreams []structs.PeeredServiceName if ns.IsSidecarProxy() { // This is a sidecar proxy, ignore the proxy service's config since we are // managed by the target service config. serviceName = ns.Proxy.DestinationServiceName - } - var upstreams []structs.PeeredServiceName - for _, us := range ns.Proxy.Upstreams { - if us.DestinationType == "" || us.DestinationType == structs.UpstreamDestTypeService { - upstreams = append(upstreams, us.DestinationID()) + + // Also if we have any upstreams defined, add them to the defaults lookup request + // so we can learn about their configs. + for _, us := range ns.Proxy.Upstreams { + if us.DestinationType == "" || us.DestinationType == structs.UpstreamDestTypeService { + psn := us.DestinationID() + if psn.Peer == "" { + psn.ServiceName.EnterpriseMeta.Merge(&ns.EnterpriseMeta) + } else { + // Peer services should not have their namespace overwritten. + psn.ServiceName.EnterpriseMeta.OverridePartition(ns.EnterpriseMeta.PartitionOrDefault()) + } + upstreams = append(upstreams, psn) + } } } + configReq := &structs.ServiceConfigRequest{ Name: serviceName, MeshGateway: ns.Proxy.MeshGateway, @@ -131,10 +141,6 @@ func MergeServiceConfig(defaults *structs.ServiceConfigResponse, service *struct ns.Proxy.EnvoyExtensions = nsExtensions } - if ratelimit := defaults.RateLimits.ToEnvoyExtension(); ratelimit != nil { - ns.Proxy.EnvoyExtensions = append(ns.Proxy.EnvoyExtensions, *ratelimit) - } - if ns.Proxy.MeshGateway.Mode == structs.MeshGatewayModeDefault { ns.Proxy.MeshGateway.Mode = defaults.MeshGateway.Mode } diff --git a/agent/configentry/merge_service_config_test.go b/agent/configentry/merge_service_config_test.go index e9f051d0026af..4f6dbb55488a2 100644 --- a/agent/configentry/merge_service_config_test.go +++ b/agent/configentry/merge_service_config_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package configentry @@ -972,111 +972,3 @@ func Test_MergeServiceConfig_UpstreamOverrides(t *testing.T) { }) } } - -// Tests that RateLimit config is a no-op in non-enterprise. -// In practice, the ratelimit config would have been validated -// on write. -func Test_MergeServiceConfig_RateLimit(t *testing.T) { - rl := structs.RateLimits{ - InstanceLevel: structs.InstanceLevelRateLimits{ - RequestsPerSecond: 1234, - RequestsMaxBurst: 2345, - Routes: []structs.InstanceLevelRouteRateLimits{ - { - PathExact: "/admin", - RequestsPerSecond: 3333, - RequestsMaxBurst: 4444, - }, - }, - }, - } - tests := []struct { - name string - defaults *structs.ServiceConfigResponse - service *structs.NodeService - want *structs.NodeService - }{ - { - name: "injects ratelimit extension", - defaults: &structs.ServiceConfigResponse{ - RateLimits: rl, - }, - service: &structs.NodeService{ - ID: "foo-proxy", - Service: "foo-proxy", - Proxy: structs.ConnectProxyConfig{ - DestinationServiceName: "foo", - DestinationServiceID: "foo", - }, - }, - want: &structs.NodeService{ - ID: "foo-proxy", - Service: "foo-proxy", - Proxy: structs.ConnectProxyConfig{ - DestinationServiceName: "foo", - DestinationServiceID: "foo", - EnvoyExtensions: func() []structs.EnvoyExtension { - if ext := rl.ToEnvoyExtension(); ext != nil { - return []structs.EnvoyExtension{*ext} - } - return nil - }(), - }, - }, - }, - { - name: "injects ratelimit extension at the end", - defaults: &structs.ServiceConfigResponse{ - RateLimits: rl, - EnvoyExtensions: []structs.EnvoyExtension{ - { - Name: "existing-ext", - Required: true, - Arguments: map[string]interface{}{ - "arg1": "val1", - }, - }, - }, - }, - service: &structs.NodeService{ - ID: "foo-proxy", - Service: "foo-proxy", - Proxy: structs.ConnectProxyConfig{ - DestinationServiceName: "foo", - DestinationServiceID: "foo", - }, - }, - - want: &structs.NodeService{ - ID: "foo-proxy", - Service: "foo-proxy", - Proxy: structs.ConnectProxyConfig{ - DestinationServiceName: "foo", - DestinationServiceID: "foo", - EnvoyExtensions: func() []structs.EnvoyExtension { - existing := []structs.EnvoyExtension{ - { - Name: "existing-ext", - Required: true, - Arguments: map[string]interface{}{ - "arg1": "val1", - }, - }, - } - if ext := rl.ToEnvoyExtension(); ext != nil { - existing = append(existing, *ext) - } - return existing - }(), - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := MergeServiceConfig(tt.defaults, tt.service) - require.NoError(t, err) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/agent/configentry/resolve.go b/agent/configentry/resolve.go index 82efc3b8bd759..882f1d16b5489 100644 --- a/agent/configentry/resolve.go +++ b/agent/configentry/resolve.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package configentry @@ -117,9 +117,6 @@ func ComputeResolvedServiceConfig( if serviceConf.Destination != nil { thisReply.Destination = *serviceConf.Destination } - if serviceConf.RateLimits != nil { - thisReply.RateLimits = *serviceConf.RateLimits - } // Populate values for the proxy config map proxyConf := thisReply.ProxyConfig diff --git a/agent/configentry/resolve_test.go b/agent/configentry/resolve_test.go index f93649df8ae7b..f0457730eaa06 100644 --- a/agent/configentry/resolve_test.go +++ b/agent/configentry/resolve_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package configentry diff --git a/agent/configentry/service_config.go b/agent/configentry/service_config.go index 83e24e27c390b..4b7e5e2a27c1d 100644 --- a/agent/configentry/service_config.go +++ b/agent/configentry/service_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package configentry diff --git a/agent/connect/authz.go b/agent/connect/authz.go index cc14dd0cb61d1..74b306354faf1 100644 --- a/agent/connect/authz.go +++ b/agent/connect/authz.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/authz_test.go b/agent/connect/authz_test.go index 1cbf17517d819..6428acfc4722e 100644 --- a/agent/connect/authz_test.go +++ b/agent/connect/authz_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/ca/common.go b/agent/connect/ca/common.go index f52b030ed977e..b83a196a8a235 100644 --- a/agent/connect/ca/common.go +++ b/agent/connect/ca/common.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider.go b/agent/connect/ca/provider.go index 898da46af7290..2ef34228bc483 100644 --- a/agent/connect/ca/provider.go +++ b/agent/connect/ca/provider.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca @@ -152,7 +152,7 @@ type PrimaryProvider interface { SignIntermediate(*x509.CertificateRequest) (string, error) // CrossSignCA must accept a CA certificate from another CA provider and cross - // sign it exactly as it is such that it forms a chain back the + // sign it exactly as it is such that it forms a chain back the the // CAProvider's current root. Specifically, the Distinguished Name, Subject // Alternative Name, SubjectKeyID and other relevant extensions must be kept. // The resulting certificate must have a distinct Serial Number and the diff --git a/agent/connect/ca/provider_aws.go b/agent/connect/ca/provider_aws.go index 1ce5a5eba57d8..d45f3295a8e74 100644 --- a/agent/connect/ca/provider_aws.go +++ b/agent/connect/ca/provider_aws.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_aws_test.go b/agent/connect/ca/provider_aws_test.go index d46221af1fce9..cba2897fa26a4 100644 --- a/agent/connect/ca/provider_aws_test.go +++ b/agent/connect/ca/provider_aws_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_consul.go b/agent/connect/ca/provider_consul.go index a4aba91942bf4..01c4987e07d81 100644 --- a/agent/connect/ca/provider_consul.go +++ b/agent/connect/ca/provider_consul.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_consul_config.go b/agent/connect/ca/provider_consul_config.go index c7e8b0346cdb9..b0998a0aa11b6 100644 --- a/agent/connect/ca/provider_consul_config.go +++ b/agent/connect/ca/provider_consul_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_consul_test.go b/agent/connect/ca/provider_consul_test.go index 658a97d39bd97..0c6959c7f5d41 100644 --- a/agent/connect/ca/provider_consul_test.go +++ b/agent/connect/ca/provider_consul_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_test.go b/agent/connect/ca/provider_test.go index 85deedbf4cb57..b7ed9e29b412d 100644 --- a/agent/connect/ca/provider_test.go +++ b/agent/connect/ca/provider_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca @@ -113,7 +113,7 @@ func TestStructs_CAConfiguration_MsgpackEncodeDecode(t *testing.T) { TLSSkipVerify: true, }, parseFunc: func(t *testing.T, raw map[string]interface{}) interface{} { - config, err := ParseVaultCAConfig(raw, true) + config, err := ParseVaultCAConfig(raw) require.NoError(t, err) return config }, diff --git a/agent/connect/ca/provider_vault.go b/agent/connect/ca/provider_vault.go index 692b9a568c236..542b2c55d0839 100644 --- a/agent/connect/ca/provider_vault.go +++ b/agent/connect/ca/provider_vault.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca @@ -99,7 +99,7 @@ func vaultTLSConfig(config *structs.VaultCAProviderConfig) *vaultapi.TLSConfig { // Configure sets up the provider using the given configuration. // Configure supports being called multiple times to re-configure the provider. func (v *VaultProvider) Configure(cfg ProviderConfig) error { - config, err := ParseVaultCAConfig(cfg.RawConfig, v.isPrimary) + config, err := ParseVaultCAConfig(cfg.RawConfig) if err != nil { return err } @@ -196,11 +196,11 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error { } func (v *VaultProvider) ValidateConfigUpdate(prevRaw, nextRaw map[string]interface{}) error { - prev, err := ParseVaultCAConfig(prevRaw, v.isPrimary) + prev, err := ParseVaultCAConfig(prevRaw) if err != nil { return fmt.Errorf("failed to parse existing CA config: %w", err) } - next, err := ParseVaultCAConfig(nextRaw, v.isPrimary) + next, err := ParseVaultCAConfig(nextRaw) if err != nil { return fmt.Errorf("failed to parse new CA config: %w", err) } @@ -848,7 +848,7 @@ func (v *VaultProvider) Cleanup(providerTypeChange bool, otherConfig map[string] v.Stop() if !providerTypeChange { - newConfig, err := ParseVaultCAConfig(otherConfig, v.isPrimary) + newConfig, err := ParseVaultCAConfig(otherConfig) if err != nil { return err } @@ -941,7 +941,7 @@ func (v *VaultProvider) autotidyIssuers(path string) (bool, string) { return tidySet, errStr } -func ParseVaultCAConfig(raw map[string]interface{}, isPrimary bool) (*structs.VaultCAProviderConfig, error) { +func ParseVaultCAConfig(raw map[string]interface{}) (*structs.VaultCAProviderConfig, error) { config := structs.VaultCAProviderConfig{ CommonCAProviderConfig: defaultCommonConfig(), } @@ -972,10 +972,10 @@ func ParseVaultCAConfig(raw map[string]interface{}, isPrimary bool) (*structs.Va return nil, fmt.Errorf("only one of Vault token or Vault auth method can be provided, but not both") } - if isPrimary && config.RootPKIPath == "" { + if config.RootPKIPath == "" { return nil, fmt.Errorf("must provide a valid path to a root PKI backend") } - if config.RootPKIPath != "" && !strings.HasSuffix(config.RootPKIPath, "/") { + if !strings.HasSuffix(config.RootPKIPath, "/") { config.RootPKIPath += "/" } diff --git a/agent/connect/ca/provider_vault_auth.go b/agent/connect/ca/provider_vault_auth.go index 70176cc3c328a..ddfbde34070c7 100644 --- a/agent/connect/ca/provider_vault_auth.go +++ b/agent/connect/ca/provider_vault_auth.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_vault_auth_alicloud.go b/agent/connect/ca/provider_vault_auth_alicloud.go index d6ae5b185ed38..1c30583179254 100644 --- a/agent/connect/ca/provider_vault_auth_alicloud.go +++ b/agent/connect/ca/provider_vault_auth_alicloud.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_vault_auth_approle.go b/agent/connect/ca/provider_vault_auth_approle.go index c3d7d8f9c8ac8..150c463aea9aa 100644 --- a/agent/connect/ca/provider_vault_auth_approle.go +++ b/agent/connect/ca/provider_vault_auth_approle.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_vault_auth_aws.go b/agent/connect/ca/provider_vault_auth_aws.go index 61762b36fd618..02abf39824cb5 100644 --- a/agent/connect/ca/provider_vault_auth_aws.go +++ b/agent/connect/ca/provider_vault_auth_aws.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_vault_auth_azure.go b/agent/connect/ca/provider_vault_auth_azure.go index ac8d326b32790..8025977007f4d 100644 --- a/agent/connect/ca/provider_vault_auth_azure.go +++ b/agent/connect/ca/provider_vault_auth_azure.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_vault_auth_gcp.go b/agent/connect/ca/provider_vault_auth_gcp.go index 10dfbf4b294a7..5eefc7143663f 100644 --- a/agent/connect/ca/provider_vault_auth_gcp.go +++ b/agent/connect/ca/provider_vault_auth_gcp.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_vault_auth_jwt.go b/agent/connect/ca/provider_vault_auth_jwt.go index e80751cd59c3d..2560f856d82af 100644 --- a/agent/connect/ca/provider_vault_auth_jwt.go +++ b/agent/connect/ca/provider_vault_auth_jwt.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_vault_auth_k8s.go b/agent/connect/ca/provider_vault_auth_k8s.go index acd6f68bc5ddc..c3a69c6ccd44b 100644 --- a/agent/connect/ca/provider_vault_auth_k8s.go +++ b/agent/connect/ca/provider_vault_auth_k8s.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_vault_auth_test.go b/agent/connect/ca/provider_vault_auth_test.go index 361d89400e22c..74507acb39e7b 100644 --- a/agent/connect/ca/provider_vault_auth_test.go +++ b/agent/connect/ca/provider_vault_auth_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca diff --git a/agent/connect/ca/provider_vault_test.go b/agent/connect/ca/provider_vault_test.go index 5b74946cada9c..9316f65602fe1 100644 --- a/agent/connect/ca/provider_vault_test.go +++ b/agent/connect/ca/provider_vault_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca @@ -62,7 +62,6 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) { cases := map[string]struct { rawConfig map[string]interface{} expConfig *structs.VaultCAProviderConfig - isPrimary bool expError string }{ "no token and no auth method provided": { @@ -73,26 +72,15 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) { rawConfig: map[string]interface{}{"Token": "test", "AuthMethod": map[string]interface{}{"Type": "test"}}, expError: "only one of Vault token or Vault auth method can be provided, but not both", }, - "primary no root PKI path": { - rawConfig: map[string]interface{}{"Token": "test", "IntermediatePKIPath": "test"}, - isPrimary: true, + "no root PKI path": { + rawConfig: map[string]interface{}{"Token": "test"}, expError: "must provide a valid path to a root PKI backend", }, - "secondary no root PKI path": { - rawConfig: map[string]interface{}{"Token": "test", "IntermediatePKIPath": "test"}, - isPrimary: false, - expConfig: &structs.VaultCAProviderConfig{ - CommonCAProviderConfig: defaultCommonConfig(), - Token: "test", - IntermediatePKIPath: "test/", - }, - }, "no root intermediate path": { rawConfig: map[string]interface{}{"Token": "test", "RootPKIPath": "test"}, expError: "must provide a valid path for the intermediate PKI backend", }, "adds a slash to RootPKIPath and IntermediatePKIPath": { - isPrimary: true, rawConfig: map[string]interface{}{"Token": "test", "RootPKIPath": "test", "IntermediatePKIPath": "test"}, expConfig: &structs.VaultCAProviderConfig{ CommonCAProviderConfig: defaultCommonConfig(), @@ -105,7 +93,7 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) { for name, c := range cases { t.Run(name, func(t *testing.T) { - config, err := ParseVaultCAConfig(c.rawConfig, c.isPrimary) + config, err := ParseVaultCAConfig(c.rawConfig) if c.expError != "" { require.EqualError(t, err, c.expError) } else { @@ -295,7 +283,7 @@ func TestVaultCAProvider_ConfigureFailureGoroutineLeakCheck(t *testing.T) { profile := pprof.Lookup("goroutine") sb := strings.Builder{} require.NoError(r, profile.WriteTo(&sb, 2)) - r.Log(sb.String()) + t.Log(sb.String()) require.Contains(r, sb.String(), "created by github.com/hashicorp/consul/agent/connect/ca.(*VaultProvider).Configure", "expected renewal goroutine, got none") @@ -628,9 +616,6 @@ func TestVaultCAProvider_SignLeaf(t *testing.T) { } func TestVaultCAProvider_CrossSignCA(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } SkipIfVaultNotPresent(t) t.Parallel() @@ -640,6 +625,12 @@ func TestVaultCAProvider_CrossSignCA(t *testing.T) { run := func(t *testing.T, tc CASigningKeyTypes, withSudo, expectFailure bool) { t.Parallel() + if tc.SigningKeyType != tc.CSRKeyType { + // TODO: uncomment since the bug is closed + // See https://github.com/hashicorp/vault/issues/7709 + t.Skip("Vault doesn't support cross-signing different key types yet.") + } + testVault1 := NewTestVaultServer(t) attr1 := &VaultTokenAttributes{ @@ -1273,6 +1264,11 @@ func TestVaultCAProvider_DeletePreviousIssuerAndKey(t *testing.T) { }) res, err := testVault.Client().Logical().List("pki-intermediate/issuers") require.NoError(t, err) + + if res == nil { + t.Skip("Vault version < 1.11 does not have multi issuers functionality") + } + // Why 2 issuers? There is always an initial issuer that // gets created before we manage the lifecycle of issuers. // Since we're asserting that the number doesn't grow diff --git a/agent/connect/ca/testing.go b/agent/connect/ca/testing.go index 8fcf8a3c65331..f588106287660 100644 --- a/agent/connect/ca/testing.go +++ b/agent/connect/ca/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ca @@ -126,7 +126,7 @@ func SkipIfVaultNotPresent(t testing.T, reqs ...vaultRequirements) { } } -func NewTestVaultServer(t retry.TestingTB) *TestVaultServer { +func NewTestVaultServer(t testing.T) *TestVaultServer { vaultBinaryName := os.Getenv("VAULT_BINARY_NAME") if vaultBinaryName == "" { vaultBinaryName = "vault" @@ -204,7 +204,7 @@ func (v *TestVaultServer) Client() *vaultapi.Client { return v.client } -func (v *TestVaultServer) WaitUntilReady(t retry.TestingTB) { +func (v *TestVaultServer) WaitUntilReady(t testing.T) { var version string retry.Run(t, func(r *retry.R) { resp, err := v.client.Sys().Health() diff --git a/agent/connect/common_names.go b/agent/connect/common_names.go index c52df9f10fb49..3c4c30633d533 100644 --- a/agent/connect/common_names.go +++ b/agent/connect/common_names.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/csr.go b/agent/connect/csr.go index 0a491b0b65522..9cf0d884dea7b 100644 --- a/agent/connect/csr.go +++ b/agent/connect/csr.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/csr_test.go b/agent/connect/csr_test.go index 1833b78a77985..6aef985f006fe 100644 --- a/agent/connect/csr_test.go +++ b/agent/connect/csr_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/generate.go b/agent/connect/generate.go index 4679e2057b92f..a811b4c4a5e1c 100644 --- a/agent/connect/generate.go +++ b/agent/connect/generate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect @@ -42,7 +42,7 @@ func generateRSAKey(keyBits int) (crypto.Signer, string, error) { // Check for a secure key length. if keyBits < MinPrivateKeyBitsRSA { - return nil, "", fmt.Errorf("error generating RSA private key: invalid key size %d, must be at least %d bits", keyBits, MinPrivateKeyBitsRSA) + return nil, "", fmt.Errorf("error generating RSA private key: key size must be at least %d bits", MinPrivateKeyBitsRSA) } pk, err := rsa.GenerateKey(rand.Reader, keyBits) diff --git a/agent/connect/generate_test.go b/agent/connect/generate_test.go index ca956b702f165..67be6081fe08c 100644 --- a/agent/connect/generate_test.go +++ b/agent/connect/generate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/parsing.go b/agent/connect/parsing.go index f1e89fe0255bc..a89544532fbb1 100644 --- a/agent/connect/parsing.go +++ b/agent/connect/parsing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect @@ -148,7 +148,7 @@ func ParseSigner(pemValue string) (crypto.Signer, error) { } // ParseCSR parses a CSR from a PEM-encoded value. The certificate request -// must be the first block in the PEM value. +// must be the the first block in the PEM value. func ParseCSR(pemValue string) (*x509.CertificateRequest, error) { // The _ result below is not an error but the remaining PEM bytes. block, _ := pem.Decode([]byte(pemValue)) diff --git a/agent/connect/sni.go b/agent/connect/sni.go index f7d14800a922f..339116b64c038 100644 --- a/agent/connect/sni.go +++ b/agent/connect/sni.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/sni_test.go b/agent/connect/sni_test.go index ed0bd07280417..acbfd49ce028e 100644 --- a/agent/connect/sni_test.go +++ b/agent/connect/sni_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index a852d9130c87b..7b30d85176478 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/testing_ca_test.go b/agent/connect/testing_ca_test.go index 9b62a2baee54b..492ca9e32d93b 100644 --- a/agent/connect/testing_ca_test.go +++ b/agent/connect/testing_ca_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/testing_spiffe.go b/agent/connect/testing_spiffe.go index e50b2b3b90cd8..f48222c443f9c 100644 --- a/agent/connect/testing_spiffe.go +++ b/agent/connect/testing_spiffe.go @@ -1,24 +1,26 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect -import "github.com/hashicorp/consul/sdk/testutil" +import ( + "github.com/mitchellh/go-testing-interface" +) // TestSpiffeIDService returns a SPIFFE ID representing a service. -func TestSpiffeIDService(t testutil.TestingTB, service string) *SpiffeIDService { +func TestSpiffeIDService(t testing.T, service string) *SpiffeIDService { return TestSpiffeIDServiceWithHost(t, service, TestClusterID+".consul") } // TestSpiffeIDServiceWithHost returns a SPIFFE ID representing a service with // the specified trust domain. -func TestSpiffeIDServiceWithHost(t testutil.TestingTB, service, host string) *SpiffeIDService { +func TestSpiffeIDServiceWithHost(t testing.T, service, host string) *SpiffeIDService { return TestSpiffeIDServiceWithHostDC(t, service, host, "dc1") } // TestSpiffeIDServiceWithHostDC returns a SPIFFE ID representing a service with // the specified trust domain for the given datacenter. -func TestSpiffeIDServiceWithHostDC(t testutil.TestingTB, service, host, datacenter string) *SpiffeIDService { +func TestSpiffeIDServiceWithHostDC(t testing.T, service, host, datacenter string) *SpiffeIDService { return &SpiffeIDService{ Host: host, Namespace: "default", diff --git a/agent/connect/uri.go b/agent/connect/uri.go index bc898f78654f4..ce44967432f61 100644 --- a/agent/connect/uri.go +++ b/agent/connect/uri.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect @@ -23,8 +23,6 @@ type CertURI interface { } var ( - spiffeIDWorkloadIdentityRegexp = regexp.MustCompile( - `^(?:/ap/([^/]+))/ns/([^/]+)/identity/([^/]+)$`) spiffeIDServiceRegexp = regexp.MustCompile( `^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`) spiffeIDAgentRegexp = regexp.MustCompile( @@ -96,32 +94,6 @@ func ParseCertURI(input *url.URL) (CertURI, error) { Datacenter: dc, Service: service, }, nil - } else if v := spiffeIDWorkloadIdentityRegexp.FindStringSubmatch(path); v != nil { - // Determine the values. We assume they're reasonable to save cycles, - // but if the raw path is not empty that means that something is - // URL encoded so we go to the slow path. - ap := v[1] - ns := v[2] - workloadIdentity := v[3] - if input.RawPath != "" { - var err error - if ap, err = url.PathUnescape(v[1]); err != nil { - return nil, fmt.Errorf("Invalid admin partition: %s", err) - } - if ns, err = url.PathUnescape(v[2]); err != nil { - return nil, fmt.Errorf("Invalid namespace: %s", err) - } - if workloadIdentity, err = url.PathUnescape(v[3]); err != nil { - return nil, fmt.Errorf("Invalid workload identity: %s", err) - } - } - - return &SpiffeIDWorkloadIdentity{ - TrustDomain: input.Host, - Partition: ap, - Namespace: ns, - WorkloadIdentity: workloadIdentity, - }, nil } else if v := spiffeIDAgentRegexp.FindStringSubmatch(path); v != nil { // Determine the values. We assume they're reasonable to save cycles, // but if the raw path is not empty that means that something is diff --git a/agent/connect/uri_agent.go b/agent/connect/uri_agent.go index 1babf99873809..c3d3a86bf115c 100644 --- a/agent/connect/uri_agent.go +++ b/agent/connect/uri_agent.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/uri_agent_ce.go b/agent/connect/uri_agent_ce.go index 70e775f6e4975..2a87d108432fb 100644 --- a/agent/connect/uri_agent_ce.go +++ b/agent/connect/uri_agent_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package connect diff --git a/agent/connect/uri_agent_ce_test.go b/agent/connect/uri_agent_ce_test.go index 026d4bc173031..57f1286fd1e1a 100644 --- a/agent/connect/uri_agent_ce_test.go +++ b/agent/connect/uri_agent_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package connect diff --git a/agent/connect/uri_mesh_gateway.go b/agent/connect/uri_mesh_gateway.go index d5cf155bf8d7c..ec474efa40851 100644 --- a/agent/connect/uri_mesh_gateway.go +++ b/agent/connect/uri_mesh_gateway.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/uri_mesh_gateway_ce.go b/agent/connect/uri_mesh_gateway_ce.go index 48c43326f1450..876e05101b6b6 100644 --- a/agent/connect/uri_mesh_gateway_ce.go +++ b/agent/connect/uri_mesh_gateway_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package connect diff --git a/agent/connect/uri_mesh_gateway_ce_test.go b/agent/connect/uri_mesh_gateway_ce_test.go index bb4579cdc641c..593de8ef31053 100644 --- a/agent/connect/uri_mesh_gateway_ce_test.go +++ b/agent/connect/uri_mesh_gateway_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package connect diff --git a/agent/connect/uri_server.go b/agent/connect/uri_server.go index 5a2b9c2429283..894ad63784bfe 100644 --- a/agent/connect/uri_server.go +++ b/agent/connect/uri_server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/uri_service.go b/agent/connect/uri_service.go index b35d1e0df437d..31bd3e5df62ba 100644 --- a/agent/connect/uri_service.go +++ b/agent/connect/uri_service.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect @@ -8,7 +8,6 @@ import ( "net/url" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/proto-public/pbresource" ) // SpiffeIDService is the structure to represent the SPIFFE ID for a service. @@ -53,14 +52,3 @@ func (id SpiffeIDService) uriPath() string { } return path } - -// SpiffeIDFromIdentityRef creates the SPIFFE ID from a workload identity. -// TODO (ishustava): make sure ref type is workload identity. -func SpiffeIDFromIdentityRef(trustDomain string, ref *pbresource.Reference) string { - return SpiffeIDWorkloadIdentity{ - TrustDomain: trustDomain, - Partition: ref.Tenancy.Partition, - Namespace: ref.Tenancy.Namespace, - WorkloadIdentity: ref.Name, - }.URI().String() -} diff --git a/agent/connect/uri_service_ce.go b/agent/connect/uri_service_ce.go index 6aafde36a60e9..4106fc811b38c 100644 --- a/agent/connect/uri_service_ce.go +++ b/agent/connect/uri_service_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package connect diff --git a/agent/connect/uri_service_ce_test.go b/agent/connect/uri_service_ce_test.go index 8368454db5237..7d73151edc032 100644 --- a/agent/connect/uri_service_ce_test.go +++ b/agent/connect/uri_service_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package connect diff --git a/agent/connect/uri_signing.go b/agent/connect/uri_signing.go index 1913ae6bdfdfe..24330a3d70b12 100644 --- a/agent/connect/uri_signing.go +++ b/agent/connect/uri_signing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect @@ -51,20 +51,14 @@ func (id SpiffeIDSigning) CanSign(cu CertURI) bool { // worry about Unicode domains if we start allowing customisation beyond the // built-in cluster ids. return strings.ToLower(other.Host) == id.Host() - case *SpiffeIDWorkloadIdentity: - // The trust domain component of the workload identity SPIFFE ID must be an exact match for now under - // ascii case folding (since hostnames are case-insensitive). Later we might - // worry about Unicode domains if we start allowing customisation beyond the - // built-in cluster ids. - return strings.ToLower(other.TrustDomain) == id.Host() case *SpiffeIDMeshGateway: - // The host component of the mesh gateway SPIFFE ID must be an exact match for now under + // The host component of the service must be an exact match for now under // ascii case folding (since hostnames are case-insensitive). Later we might // worry about Unicode domains if we start allowing customisation beyond the // built-in cluster ids. return strings.ToLower(other.Host) == id.Host() case *SpiffeIDServer: - // The host component of the server SPIFFE ID must be an exact match for now under + // The host component of the service must be an exact match for now under // ascii case folding (since hostnames are case-insensitive). Later we might // worry about Unicode domains if we start allowing customisation beyond the // built-in cluster ids. diff --git a/agent/connect/uri_signing_test.go b/agent/connect/uri_signing_test.go index 737ca460542b7..ba426173160ea 100644 --- a/agent/connect/uri_signing_test.go +++ b/agent/connect/uri_signing_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect @@ -98,30 +98,6 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) { input: &SpiffeIDService{Host: TestClusterID + ".fake", Namespace: "default", Datacenter: "dc1", Service: "web"}, want: false, }, - { - name: "workload - good", - id: testSigning, - input: &SpiffeIDWorkloadIdentity{TrustDomain: TestClusterID + ".consul", Namespace: "default", WorkloadIdentity: "web"}, - want: true, - }, - { - name: "workload - good mixed case", - id: testSigning, - input: &SpiffeIDWorkloadIdentity{TrustDomain: strings.ToUpper(TestClusterID) + ".CONsuL", Namespace: "defAUlt", WorkloadIdentity: "WEB"}, - want: true, - }, - { - name: "workload - different cluster", - id: testSigning, - input: &SpiffeIDWorkloadIdentity{TrustDomain: "55555555-4444-3333-2222-111111111111.consul", Namespace: "default", WorkloadIdentity: "web"}, - want: false, - }, - { - name: "workload - different TLD", - id: testSigning, - input: &SpiffeIDWorkloadIdentity{TrustDomain: TestClusterID + ".fake", Namespace: "default", WorkloadIdentity: "web"}, - want: false, - }, { name: "mesh gateway - good", id: testSigning, diff --git a/agent/connect/uri_test.go b/agent/connect/uri_test.go index 52116845975bb..2ea439f53668c 100644 --- a/agent/connect/uri_test.go +++ b/agent/connect/uri_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect @@ -51,61 +51,6 @@ func TestParseCertURIFromString(t *testing.T) { }, ParseError: "", }, - { - Name: "basic workload ID", - URI: "spiffe://1234.consul/ap/default/ns/default/identity/web", - Struct: &SpiffeIDWorkloadIdentity{ - TrustDomain: "1234.consul", - Partition: defaultEntMeta.PartitionOrDefault(), - Namespace: "default", - WorkloadIdentity: "web", - }, - ParseError: "", - }, - { - Name: "basic workload ID with nondefault partition", - URI: "spiffe://1234.consul/ap/bizdev/ns/default/identity/web", - Struct: &SpiffeIDWorkloadIdentity{ - TrustDomain: "1234.consul", - Partition: "bizdev", - Namespace: "default", - WorkloadIdentity: "web", - }, - ParseError: "", - }, - { - Name: "workload ID error - missing identity", - URI: "spiffe://1234.consul/ns/default", - Struct: &SpiffeIDWorkloadIdentity{ - TrustDomain: "1234.consul", - Partition: defaultEntMeta.PartitionOrDefault(), - Namespace: "default", - WorkloadIdentity: "web", - }, - ParseError: "SPIFFE ID is not in the expected format", - }, - { - Name: "workload ID error - missing partition", - URI: "spiffe://1234.consul/ns/default/identity/web", - Struct: &SpiffeIDWorkloadIdentity{ - TrustDomain: "1234.consul", - Partition: defaultEntMeta.PartitionOrDefault(), - Namespace: "default", - WorkloadIdentity: "web", - }, - ParseError: "SPIFFE ID is not in the expected format", - }, - { - Name: "workload ID error - missing namespace", - URI: "spiffe://1234.consul/ap/default/identity/web", - Struct: &SpiffeIDWorkloadIdentity{ - TrustDomain: "1234.consul", - Partition: defaultEntMeta.PartitionOrDefault(), - Namespace: "default", - WorkloadIdentity: "web", - }, - ParseError: "SPIFFE ID is not in the expected format", - }, { Name: "basic agent ID", URI: "spiffe://1234.consul/agent/client/dc/dc1/id/uuid", diff --git a/agent/connect/uri_workload_identity.go b/agent/connect/uri_workload_identity.go deleted file mode 100644 index 83e022bde69e3..0000000000000 --- a/agent/connect/uri_workload_identity.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package connect - -import ( - "fmt" - "net/url" -) - -// SpiffeIDWorkloadIdentity is the structure to represent the SPIFFE ID for a workload. -type SpiffeIDWorkloadIdentity struct { - TrustDomain string - Partition string - Namespace string - WorkloadIdentity string -} - -// URI returns the *url.URL for this SPIFFE ID. -func (id SpiffeIDWorkloadIdentity) URI() *url.URL { - var result url.URL - result.Scheme = "spiffe" - result.Host = id.TrustDomain - result.Path = id.uriPath() - return &result -} - -func (id SpiffeIDWorkloadIdentity) uriPath() string { - // Although CE has no support for partitions, it still needs to be able to - // handle exportedPartition from peered Consul Enterprise clusters in order - // to generate the correct SpiffeID. - // We intentionally avoid using pbpartition.DefaultName here to be CE friendly. - path := fmt.Sprintf("/ap/%s/ns/%s/identity/%s", - id.Partition, - id.Namespace, - id.WorkloadIdentity, - ) - - return path -} diff --git a/agent/connect/uri_workload_identity_ce.go b/agent/connect/uri_workload_identity_ce.go deleted file mode 100644 index 03505616341ee..0000000000000 --- a/agent/connect/uri_workload_identity_ce.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package connect - -import ( - "github.com/hashicorp/consul/acl" -) - -// TODO: this will need to somehow be updated to set namespace here when we include namespaces in CE - -// GetEnterpriseMeta will synthesize an EnterpriseMeta struct from the SpiffeIDWorkloadIdentity. -// in CE this just returns an empty (but never nil) struct pointer -func (id SpiffeIDWorkloadIdentity) GetEnterpriseMeta() *acl.EnterpriseMeta { - return &acl.EnterpriseMeta{} -} diff --git a/agent/connect/uri_workload_identity_test.go b/agent/connect/uri_workload_identity_test.go deleted file mode 100644 index 94beb80f584bb..0000000000000 --- a/agent/connect/uri_workload_identity_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package connect - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSpiffeIDWorkloadURI(t *testing.T) { - t.Run("spiffe id workload uri default tenancy", func(t *testing.T) { - wl := &SpiffeIDWorkloadIdentity{ - TrustDomain: "1234.consul", - WorkloadIdentity: "web", - Partition: "default", - Namespace: "default", - } - require.Equal(t, "spiffe://1234.consul/ap/default/ns/default/identity/web", wl.URI().String()) - }) - t.Run("spiffe id workload uri non-default tenancy", func(t *testing.T) { - wl := &SpiffeIDWorkloadIdentity{ - TrustDomain: "1234.consul", - WorkloadIdentity: "web", - Partition: "part1", - Namespace: "dev", - } - require.Equal(t, "spiffe://1234.consul/ap/part1/ns/dev/identity/web", wl.URI().String()) - }) -} diff --git a/agent/connect/x509_patch.go b/agent/connect/x509_patch.go index 54a33ce07834f..f448154f8d9f0 100644 --- a/agent/connect/x509_patch.go +++ b/agent/connect/x509_patch.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect/x509_patch_test.go b/agent/connect/x509_patch_test.go index bdcb99045b578..1447802a5b879 100644 --- a/agent/connect/x509_patch_test.go +++ b/agent/connect/x509_patch_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connect diff --git a/agent/connect_auth.go b/agent/connect_auth.go new file mode 100644 index 0000000000000..7060d10b599f5 --- /dev/null +++ b/agent/connect_auth.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" +) + +// TODO(rb/intentions): this should move back into the agent endpoint since +// there is no ext_authz implementation anymore. +// +// ConnectAuthorize implements the core authorization logic for Connect. It's in +// a separate agent method here because we need to re-use this both in our own +// HTTP API authz endpoint and in the gRPX xDS/ext_authz API for envoy. +// +// NOTE: This treats any L7 intentions as DENY. +// +// The ACL token and the auth request are provided and the auth decision (true +// means authorized) and reason string are returned. +// +// If the request input is invalid the error returned will be a BadRequest HTTPError, +// if the token doesn't grant necessary access then an acl.ErrPermissionDenied +// error is returned, otherwise error indicates an unexpected server failure. If +// access is denied, no error is returned but the first return value is false. +func (a *Agent) ConnectAuthorize(token string, + req *structs.ConnectAuthorizeRequest) (allowed bool, reason string, m *cache.ResultMeta, err error) { + + // Helper to make the error cases read better without resorting to named + // returns which get messy and prone to mistakes in a method this long. + returnErr := func(err error) (bool, string, *cache.ResultMeta, error) { + return false, "", nil, err + } + + if req == nil { + return returnErr(HTTPError{StatusCode: http.StatusBadRequest, Reason: "Invalid request"}) + } + + // We need to have a target to check intentions + if req.Target == "" { + return returnErr(HTTPError{StatusCode: http.StatusBadRequest, Reason: "Target service must be specified"}) + } + + // Parse the certificate URI from the client ID + uri, err := connect.ParseCertURIFromString(req.ClientCertURI) + if err != nil { + return returnErr(HTTPError{StatusCode: http.StatusBadRequest, Reason: "ClientCertURI not a valid Connect identifier"}) + } + + uriService, ok := uri.(*connect.SpiffeIDService) + if !ok { + return returnErr(HTTPError{StatusCode: http.StatusBadRequest, Reason: "ClientCertURI not a valid Service identifier"}) + } + + // We need to verify service:write permissions for the given token. + // We do this manually here since the RPC request below only verifies + // service:read. + var authzContext acl.AuthorizerContext + authz, err := a.delegate.ResolveTokenAndDefaultMeta(token, &req.EnterpriseMeta, &authzContext) + if err != nil { + return returnErr(err) + } + + if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(req.Target, &authzContext); err != nil { + return returnErr(err) + } + + if !uriService.MatchesPartition(req.TargetPartition()) { + reason = fmt.Sprintf("Mismatched partitions: %q != %q", + uriService.PartitionOrDefault(), + acl.PartitionOrDefault(req.TargetPartition())) + return false, reason, nil, nil + } + + // Note that we DON'T explicitly validate the trust-domain matches ours. See + // the PR for this change for details. + + // TODO(banks): Implement revocation list checking here. + + // Get the intentions for this target service. + args := &structs.IntentionQueryRequest{ + Datacenter: a.config.Datacenter, + Match: &structs.IntentionQueryMatch{ + Type: structs.IntentionMatchDestination, + Entries: []structs.IntentionMatchEntry{ + { + Namespace: req.TargetNamespace(), + Partition: req.TargetPartition(), + Name: req.Target, + }, + }, + }, + QueryOptions: structs.QueryOptions{Token: token}, + } + + raw, meta, err := a.cache.Get(context.TODO(), cachetype.IntentionMatchName, args) + if err != nil { + return returnErr(err) + } + + reply, ok := raw.(*structs.IndexedIntentionMatches) + if !ok { + return returnErr(fmt.Errorf("internal error: response type not correct")) + } + if len(reply.Matches) != 1 { + return returnErr(fmt.Errorf("Internal error loading matches")) + } + + // Figure out which source matches this request. + var ixnMatch *structs.Intention + for _, ixn := range reply.Matches[0] { + // We match on the intention source because the uriService is the source of the connection to authorize. + if _, ok := connect.AuthorizeIntentionTarget( + uriService.Service, uriService.Namespace, uriService.Partition, "", ixn, structs.IntentionMatchSource); ok { + ixnMatch = ixn + break + } + } + + if ixnMatch != nil { + if len(ixnMatch.Permissions) == 0 { + // This is an L4 intention. + reason = fmt.Sprintf("Matched L4 intention: %s", ixnMatch.String()) + auth := ixnMatch.Action == structs.IntentionActionAllow + return auth, reason, &meta, nil + } + + // This is an L7 intention, so DENY. + reason = fmt.Sprintf("Matched L7 intention: %s", ixnMatch.String()) + return false, reason, &meta, nil + } + + reason = "Default behavior configured by ACLs" + return authz.IntentionDefaultAllow(nil) == acl.Allow, reason, &meta, nil +} diff --git a/agent/connect_ca_endpoint.go b/agent/connect_ca_endpoint.go index 0a60f37662473..913836f8c8757 100644 --- a/agent/connect_ca_endpoint.go +++ b/agent/connect_ca_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/connect_ca_endpoint_test.go b/agent/connect_ca_endpoint_test.go index f83d7328863c9..575250de4cadc 100644 --- a/agent/connect_ca_endpoint_test.go +++ b/agent/connect_ca_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/consul/acl.go b/agent/consul/acl.go index cdfcad640a3cd..c0107a6aa5a2c 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -102,10 +102,6 @@ func (id *missingIdentity) NodeIdentityList() []*structs.ACLNodeIdentity { return nil } -func (id *missingIdentity) TemplatedPolicyList() []*structs.ACLTemplatedPolicy { - return nil -} - func (id *missingIdentity) IsExpired(asOf time.Time) bool { return false } @@ -221,34 +217,6 @@ type ACLResolverSettings struct { ACLDefaultPolicy string } -func (a ACLResolverSettings) CheckACLs() error { - switch a.ACLDefaultPolicy { - case "allow": - case "deny": - default: - return fmt.Errorf("Unsupported default ACL policy: %s", a.ACLDefaultPolicy) - } - switch a.ACLDownPolicy { - case "allow": - case "deny": - case "async-cache", "extend-cache": - default: - return fmt.Errorf("Unsupported down ACL policy: %s", a.ACLDownPolicy) - } - return nil -} - -func (s ACLResolverSettings) IsDefaultAllow() (bool, error) { - switch s.ACLDefaultPolicy { - case "allow": - return true, nil - case "deny": - return false, nil - default: - return false, fmt.Errorf("unexpected ACL default policy value of %q", s.ACLDefaultPolicy) - } -} - // ACLResolver is the type to handle all your token and policy resolution needs. // // Supports: @@ -628,11 +596,9 @@ func (r *ACLResolver) resolvePoliciesForIdentity(identity structs.ACLIdentity) ( roleIDs = identity.RoleIDs() serviceIdentities = structs.ACLServiceIdentities(identity.ServiceIdentityList()) nodeIdentities = structs.ACLNodeIdentities(identity.NodeIdentityList()) - templatedPolicies = structs.ACLTemplatedPolicies(identity.TemplatedPolicyList()) ) - if len(policyIDs) == 0 && len(serviceIdentities) == 0 && - len(roleIDs) == 0 && len(nodeIdentities) == 0 && len(templatedPolicies) == 0 { + if len(policyIDs) == 0 && len(serviceIdentities) == 0 && len(roleIDs) == 0 && len(nodeIdentities) == 0 { // In this case the default policy will be all that is in effect. return nil, nil } @@ -650,19 +616,16 @@ func (r *ACLResolver) resolvePoliciesForIdentity(identity structs.ACLIdentity) ( } serviceIdentities = append(serviceIdentities, role.ServiceIdentities...) nodeIdentities = append(nodeIdentities, role.NodeIdentityList()...) - templatedPolicies = append(templatedPolicies, role.TemplatedPolicyList()...) } // Now deduplicate any policies or service identities that occur more than once. policyIDs = dedupeStringSlice(policyIDs) serviceIdentities = serviceIdentities.Deduplicate() nodeIdentities = nodeIdentities.Deduplicate() - templatedPolicies = templatedPolicies.Deduplicate() // Generate synthetic policies for all service identities in effect. syntheticPolicies := r.synthesizePoliciesForServiceIdentities(serviceIdentities, identity.EnterpriseMetadata()) syntheticPolicies = append(syntheticPolicies, r.synthesizePoliciesForNodeIdentities(nodeIdentities, identity.EnterpriseMetadata())...) - syntheticPolicies = append(syntheticPolicies, r.synthesizePoliciesForTemplatedPolicies(templatedPolicies, identity.EnterpriseMetadata())...) // For the new ACLs policy replication is mandatory for correct operation on servers. Therefore // we only attempt to resolve policies locally @@ -706,24 +669,6 @@ func (r *ACLResolver) synthesizePoliciesForNodeIdentities(nodeIdentities []*stru return syntheticPolicies } -func (r *ACLResolver) synthesizePoliciesForTemplatedPolicies(templatedPolicies []*structs.ACLTemplatedPolicy, entMeta *acl.EnterpriseMeta) []*structs.ACLPolicy { - if len(templatedPolicies) == 0 { - return nil - } - - syntheticPolicies := make([]*structs.ACLPolicy, 0, len(templatedPolicies)) - for _, tp := range templatedPolicies { - policy, err := tp.SyntheticPolicy(entMeta) - if err != nil { - r.logger.Warn(fmt.Sprintf("could not generate synthetic policy for templated policy: %q", tp.TemplateName), "error", err) - continue - } - syntheticPolicies = append(syntheticPolicies, policy) - } - - return syntheticPolicies -} - func mergeStringSlice(a, b []string) []string { out := make([]string, 0, len(a)+len(b)) out = append(out, a...) diff --git a/agent/consul/acl_authmethod.go b/agent/consul/acl_authmethod.go index 217007f2b5ee4..42f5b6e2404df 100644 --- a/agent/consul/acl_authmethod.go +++ b/agent/consul/acl_authmethod.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/acl_authmethod_ce.go b/agent/consul/acl_authmethod_ce.go index 28802e77e633b..94bf78bd25693 100644 --- a/agent/consul/acl_authmethod_ce.go +++ b/agent/consul/acl_authmethod_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/acl_ce.go b/agent/consul/acl_ce.go index a21514f3bbb7f..aafe26a13ef91 100644 --- a/agent/consul/acl_ce.go +++ b/agent/consul/acl_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/acl_ce_test.go b/agent/consul/acl_ce_test.go index 5078ab41af743..69660f9da8044 100644 --- a/agent/consul/acl_ce_test.go +++ b/agent/consul/acl_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/acl_client.go b/agent/consul/acl_client.go index e6ff70720cfc8..d133807604bf3 100644 --- a/agent/consul/acl_client.go +++ b/agent/consul/acl_client.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/acl_endpoint.go b/agent/consul/acl_endpoint.go index 2b8eb88952ef0..0dac4fdd9436a 100644 --- a/agent/consul/acl_endpoint.go +++ b/agent/consul/acl_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -350,10 +350,9 @@ func (a *ACL) lookupExpandedTokenInfo(ws memdb.WatchSet, state *state.Store, tok policyIDs := make(map[string]struct{}) roleIDs := make(map[string]struct{}) identityPolicies := make(map[string]*structs.ACLPolicy) - templatedPolicies := make(map[string]*structs.ACLPolicy) tokenInfo := structs.ExpandedTokenInfo{} - // Add the token's policies, templated policies and node/service identity policies + // Add the token's policies and node/service identity policies for _, policy := range token.Policies { policyIDs[policy.ID] = struct{}{} } @@ -369,14 +368,6 @@ func (a *ACL) lookupExpandedTokenInfo(ws memdb.WatchSet, state *state.Store, tok policy := identity.SyntheticPolicy(&token.EnterpriseMeta) identityPolicies[policy.ID] = policy } - for _, templatedPolicy := range token.TemplatedPolicies { - policy, err := templatedPolicy.SyntheticPolicy(&token.EnterpriseMeta) - if err != nil { - a.logger.Warn(fmt.Sprintf("could not generate synthetic policy for templated policy: %q", templatedPolicy.TemplateName), "error", err) - continue - } - templatedPolicies[policy.ID] = policy - } // Get any namespace default roles/policies to look up nsPolicies, nsRoles, err := getTokenNamespaceDefaults(ws, state, &token.EnterpriseMeta) @@ -414,14 +405,6 @@ func (a *ACL) lookupExpandedTokenInfo(ws memdb.WatchSet, state *state.Store, tok policy := identity.SyntheticPolicy(&role.EnterpriseMeta) identityPolicies[policy.ID] = policy } - for _, templatedPolicy := range role.TemplatedPolicies { - policy, err := templatedPolicy.SyntheticPolicy(&role.EnterpriseMeta) - if err != nil { - a.logger.Warn(fmt.Sprintf("could not generate synthetic policy for templated policy: %q", templatedPolicy.TemplateName), "error", err) - continue - } - templatedPolicies[policy.ID] = policy - } tokenInfo.ExpandedRoles = append(tokenInfo.ExpandedRoles, role) } @@ -440,9 +423,6 @@ func (a *ACL) lookupExpandedTokenInfo(ws memdb.WatchSet, state *state.Store, tok for _, policy := range identityPolicies { policies = append(policies, policy) } - for _, policy := range templatedPolicies { - policies = append(policies, policy) - } tokenInfo.ExpandedPolicies = policies tokenInfo.AgentACLDefaultPolicy = a.srv.config.ACLResolverSettings.ACLDefaultPolicy @@ -506,7 +486,6 @@ func (a *ACL) TokenClone(args *structs.ACLTokenSetRequest, reply *structs.ACLTok Roles: token.Roles, ServiceIdentities: token.ServiceIdentities, NodeIdentities: token.NodeIdentities, - TemplatedPolicies: token.TemplatedPolicies, Local: token.Local, Description: token.Description, ExpirationTime: token.ExpirationTime, @@ -1385,27 +1364,6 @@ func (a *ACL) RoleSet(args *structs.ACLRoleSetRequest, reply *structs.ACLRole) e } role.NodeIdentities = role.NodeIdentities.Deduplicate() - for _, templatedPolicy := range role.TemplatedPolicies { - if templatedPolicy.TemplateName == "" { - return fmt.Errorf("templated policy is missing the template name field on this role") - } - - baseTemplate, ok := structs.GetACLTemplatedPolicyBase(templatedPolicy.TemplateName) - if !ok { - return fmt.Errorf("templated policy with an invalid templated name: %s for this role", templatedPolicy.TemplateName) - } - - if templatedPolicy.TemplateID == "" { - templatedPolicy.TemplateID = baseTemplate.TemplateID - } - - err := templatedPolicy.ValidateTemplatedPolicy(baseTemplate.Schema) - if err != nil { - return fmt.Errorf("encountered role with invalid templated policy: %w", err) - } - } - role.TemplatedPolicies = role.TemplatedPolicies.Deduplicate() - // calculate the hash for this role role.SetHash(true) @@ -1719,12 +1677,18 @@ func (a *ACL) BindingRuleSet(args *structs.ACLBindingRuleSetRequest, reply *stru return fmt.Errorf("Invalid Binding Rule: no BindName is set") } - if rule.BindType != structs.BindingRuleBindTypeTemplatedPolicy && rule.BindVars != nil { - return fmt.Errorf("invalid Binding Rule: BindVars cannot be set when bind type is not templated-policy.") + switch rule.BindType { + case structs.BindingRuleBindTypeService: + case structs.BindingRuleBindTypeNode: + case structs.BindingRuleBindTypeRole: + default: + return fmt.Errorf("Invalid Binding Rule: unknown BindType %q", rule.BindType) } - if err := auth.IsValidBindingRule(rule.BindType, rule.BindName, rule.BindVars, blankID.ProjectedVarNames()); err != nil { - return fmt.Errorf("Invalid Binding Rule: invalid BindName or BindVars: %w", err) + if valid, err := auth.IsValidBindName(rule.BindType, rule.BindName, blankID.ProjectedVarNames()); err != nil { + return fmt.Errorf("Invalid Binding Rule: invalid BindName: %v", err) + } else if !valid { + return fmt.Errorf("Invalid Binding Rule: invalid BindName") } req := &structs.ACLBindingRuleBatchSetRequest{ diff --git a/agent/consul/acl_endpoint_ce.go b/agent/consul/acl_endpoint_ce.go index 13d00a1694752..9d45f0fd7d890 100644 --- a/agent/consul/acl_endpoint_ce.go +++ b/agent/consul/acl_endpoint_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/acl_endpoint_test.go b/agent/consul/acl_endpoint_test.go index 7033e90881ee9..20deb56aa4b00 100644 --- a/agent/consul/acl_endpoint_test.go +++ b/agent/consul/acl_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -23,7 +23,6 @@ import ( "github.com/hashicorp/consul/agent/consul/authmethod/testauth" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs/aclfilter" - "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" @@ -377,7 +376,7 @@ func TestACLEndpoint_TokenRead(t *testing.T) { require.ElementsMatch(t, []*structs.ACLRole{r1, r2}, resp.ExpandedRoles) }) - t.Run("expanded output with node/service identities and templated policies", func(t *testing.T) { + t.Run("expanded output with node/service identities", func(t *testing.T) { setReq := structs.ACLTokenSetRequest{ Datacenter: "dc1", ACLToken: structs.ACLToken{ @@ -402,22 +401,6 @@ func TestACLEndpoint_TokenRead(t *testing.T) { Datacenter: "dc1", }, }, - TemplatedPolicies: []*structs.ACLTemplatedPolicy{ - { - TemplateName: api.ACLTemplatedPolicyServiceName, - TemplateVariables: &structs.ACLTemplatedPolicyVariables{ - Name: "web", - }, - Datacenters: []string{"dc1"}, - }, - { - TemplateName: api.ACLTemplatedPolicyNodeName, - TemplateVariables: &structs.ACLTemplatedPolicyVariables{ - Name: "foo", - }, - Datacenters: []string{"dc1"}, - }, - }, Local: false, }, WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, @@ -431,11 +414,6 @@ func TestACLEndpoint_TokenRead(t *testing.T) { for _, serviceIdentity := range setReq.ACLToken.NodeIdentities { expectedPolicies = append(expectedPolicies, serviceIdentity.SyntheticPolicy(entMeta)) } - for _, templatedPolicy := range setReq.ACLToken.TemplatedPolicies { - pol, tmplError := templatedPolicy.SyntheticPolicy(entMeta) - require.NoError(t, tmplError) - expectedPolicies = append(expectedPolicies, pol) - } setResp := structs.ACLToken{} err := msgpackrpc.CallWithCodec(codec, "ACL.TokenSet", &setReq, &setResp) @@ -490,10 +468,6 @@ func TestACLEndpoint_TokenClone(t *testing.T) { t.NodeIdentities = []*structs.ACLNodeIdentity{ {NodeName: "foo", Datacenter: "bar"}, } - t.TemplatedPolicies = []*structs.ACLTemplatedPolicy{ - {TemplateName: api.ACLTemplatedPolicyServiceName, TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "foo"}, Datacenters: []string{"bar"}}, - {TemplateName: api.ACLTemplatedPolicyNodeName, TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "node"}}, - } }) require.NoError(t, err) @@ -516,7 +490,6 @@ func TestACLEndpoint_TokenClone(t *testing.T) { require.Equal(t, t1.Roles, t2.Roles) require.Equal(t, t1.ServiceIdentities, t2.ServiceIdentities) require.Equal(t, t1.NodeIdentities, t2.NodeIdentities) - require.Equal(t, t1.TemplatedPolicies, t2.TemplatedPolicies) require.Equal(t, t1.Local, t2.Local) require.NotEqual(t, t1.AccessorID, t2.AccessorID) require.NotEqual(t, t1.SecretID, t2.SecretID) @@ -575,10 +548,6 @@ func TestACLEndpoint_TokenSet(t *testing.T) { Datacenter: "dc1", }, }, - TemplatedPolicies: []*structs.ACLTemplatedPolicy{ - {TemplateName: api.ACLTemplatedPolicyServiceName, TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "foo"}, Datacenters: []string{"bar"}}, - {TemplateName: api.ACLTemplatedPolicyNodeName, TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "node"}}, - }, }, WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, } @@ -601,19 +570,6 @@ func TestACLEndpoint_TokenSet(t *testing.T) { require.Equal(t, "foo", token.NodeIdentities[0].NodeName) require.Equal(t, "dc1", token.NodeIdentities[0].Datacenter) - require.Len(t, token.TemplatedPolicies, 2) - require.Contains(t, token.TemplatedPolicies, &structs.ACLTemplatedPolicy{ - TemplateID: structs.ACLTemplatedPolicyServiceID, - TemplateName: api.ACLTemplatedPolicyServiceName, - TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "foo"}, - Datacenters: []string{"bar"}, - }) - require.Contains(t, token.TemplatedPolicies, &structs.ACLTemplatedPolicy{ - TemplateID: structs.ACLTemplatedPolicyNodeID, - TemplateName: api.ACLTemplatedPolicyNodeName, - TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "node"}, - }) - accessorID = token.AccessorID }) @@ -2227,39 +2183,6 @@ func TestACLEndpoint_PolicySet_CustomID(t *testing.T) { require.Error(t, err) } -func TestACLEndpoint_TemplatedPolicySet_UnknownTemplateName(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - - _, srv, _ := testACLServerWithConfig(t, nil, false) - waitForLeaderEstablishment(t, srv) - - aclEp := ACL{srv: srv} - - t.Run("unknown template name", func(t *testing.T) { - req := structs.ACLTokenSetRequest{ - Datacenter: "dc1", - ACLToken: structs.ACLToken{ - Description: "foobar", - Policies: nil, - Local: false, - TemplatedPolicies: []*structs.ACLTemplatedPolicy{{TemplateName: "fake-builtin"}}, - }, - Create: true, - WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, - } - - resp := structs.ACLToken{} - - err := aclEp.TokenSet(&req, &resp) - require.Error(t, err) - require.ErrorContains(t, err, "no such ACL templated policy with Name \"fake-builtin\"") - }) -} - func TestACLEndpoint_PolicySet_builtins(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -3573,7 +3496,7 @@ func TestACLEndpoint_BindingRuleSet(t *testing.T) { } } - requireSetErrors := func(t *testing.T, reqRule structs.ACLBindingRule, msg ...string) { + requireSetErrors := func(t *testing.T, reqRule structs.ACLBindingRule) { req := structs.ACLBindingRuleSetRequest{ Datacenter: "dc1", BindingRule: reqRule, @@ -3583,10 +3506,6 @@ func TestACLEndpoint_BindingRuleSet(t *testing.T) { err := aclEp.BindingRuleSet(&req, &resp) require.Error(t, err) - - for _, s := range msg { - require.Contains(t, err.Error(), s) - } } requireOK := func(t *testing.T, reqRule structs.ACLBindingRule) *structs.ACLBindingRule { @@ -3663,71 +3582,6 @@ func TestACLEndpoint_BindingRuleSet(t *testing.T) { require.Equal(t, "test-node", rule.BindName) }) - t.Run("Bind Policy", func(t *testing.T) { - req := structs.ACLBindingRuleSetRequest{ - Datacenter: "dc1", - BindingRule: structs.ACLBindingRule{ - Description: "foobar policy", - AuthMethod: testAuthMethod.Name, - Selector: "serviceaccount.name==abc", - BindType: structs.BindingRuleBindTypePolicy, - BindName: "test-policy", - }, - WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, - } - var resp structs.ACLBindingRule - - err := aclEp.BindingRuleSet(&req, &resp) - require.NoError(t, err) - require.NotNil(t, resp.ID) - - // Get the rule directly to validate that it exists - ruleResp, err := retrieveTestBindingRule(codec, TestDefaultInitialManagementToken, "dc1", resp.ID) - require.NoError(t, err) - rule := ruleResp.BindingRule - - require.NotEmpty(t, rule.ID) - require.Equal(t, rule.Description, "foobar policy") - require.Equal(t, rule.AuthMethod, testAuthMethod.Name) - require.Equal(t, "serviceaccount.name==abc", rule.Selector) - require.Equal(t, structs.BindingRuleBindTypePolicy, rule.BindType) - require.Equal(t, "test-policy", rule.BindName) - }) - - t.Run("templated policy", func(t *testing.T) { - req := structs.ACLBindingRuleSetRequest{ - Datacenter: "dc1", - BindingRule: structs.ACLBindingRule{ - Description: "templated policy binding rule", - AuthMethod: testAuthMethod.Name, - Selector: "serviceaccount.name==abc", - BindType: structs.BindingRuleBindTypeTemplatedPolicy, - BindName: api.ACLTemplatedPolicyNodeName, - BindVars: &structs.ACLTemplatedPolicyVariables{ - Name: "test-node", - }, - }, - WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, - } - var resp structs.ACLBindingRule - - err := aclEp.BindingRuleSet(&req, &resp) - require.NoError(t, err) - require.NotNil(t, resp.ID) - - // Get the rule directly to validate that it exists - ruleResp, err := retrieveTestBindingRule(codec, TestDefaultInitialManagementToken, "dc1", resp.ID) - require.NoError(t, err) - rule := ruleResp.BindingRule - - require.NotEmpty(t, rule.ID) - require.Equal(t, rule.Description, "templated policy binding rule") - require.Equal(t, rule.AuthMethod, testAuthMethod.Name) - require.Equal(t, "serviceaccount.name==abc", rule.Selector) - require.Equal(t, structs.BindingRuleBindTypeTemplatedPolicy, rule.BindType) - require.Equal(t, api.ACLTemplatedPolicyNodeName, rule.BindName) - }) - t.Run("Update fails; cannot change method name", func(t *testing.T) { reqRule := newRule() reqRule.ID = ruleID @@ -3844,35 +3698,10 @@ func TestACLEndpoint_BindingRuleSet(t *testing.T) { requireSetErrors(t, reqRule) }) - t.Run("Create fails; when bind vars is set for non templated policy", func(t *testing.T) { - reqRule := newRule() - reqRule.BindVars = &structs.ACLTemplatedPolicyVariables{ - Name: "test", - } - requireSetErrors(t, reqRule, "invalid Binding Rule: BindVars cannot be set when bind type is not templated-policy.") - }) - - t.Run("Create fails; when missing required bindvars", func(t *testing.T) { - reqRule := newRule() - reqRule.BindName = api.ACLTemplatedPolicyServiceName - reqRule.BindType = structs.BindingRuleBindTypeTemplatedPolicy - requireSetErrors(t, reqRule, "Invalid Binding Rule: invalid BindName or BindVars") - }) - - t.Run("Create fails; when bindvars contains unknown vars", func(t *testing.T) { - reqRule := newRule() - reqRule.BindName = api.ACLTemplatedPolicyServiceName - reqRule.BindType = structs.BindingRuleBindTypeTemplatedPolicy - reqRule.BindVars = &structs.ACLTemplatedPolicyVariables{ - Name: "method-${serviceaccount.bizarroname}", - } - requireSetErrors(t, reqRule, "Invalid Binding Rule: invalid BindName or BindVars") - }) - t.Run("Create fails; invalid bind type", func(t *testing.T) { reqRule := newRule() reqRule.BindType = "invalid" - requireSetErrors(t, reqRule, "invalid Binding Rule: unknown BindType") + requireSetErrors(t, reqRule) }) t.Run("Create fails; bind name with unknown vars", func(t *testing.T) { @@ -4571,11 +4400,6 @@ func TestACLEndpoint_Login(t *testing.T) { "fake-node", "default", "mynode", "jkl101", ) - testauth.InstallSessionToken( - testSessionID, - "fake-policy", // 1 rule (policy) - "default", "mypolicy", "jkl012", - ) method, err := upsertTestAuthMethod(codec, TestDefaultInitialManagementToken, "dc1", testSessionID) require.NoError(t, err) @@ -4623,15 +4447,6 @@ func TestACLEndpoint_Login(t *testing.T) { ) require.NoError(t, err) - // policy rule - _, err = upsertTestBindingRule( - codec, TestDefaultInitialManagementToken, "dc1", method.Name, - "serviceaccount.namespace==default and serviceaccount.name==mypolicy", - structs.BindingRuleBindTypePolicy, - "method-${serviceaccount.name}", - ) - require.NoError(t, err) - t.Run("do not provide a token", func(t *testing.T) { req := structs.ACLLoginRequest{ Auth: &structs.ACLLoginParams{ diff --git a/agent/consul/acl_replication.go b/agent/consul/acl_replication.go index 79e4e5d7a7d89..849e81adf697a 100644 --- a/agent/consul/acl_replication.go +++ b/agent/consul/acl_replication.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/acl_replication_test.go b/agent/consul/acl_replication_test.go index 70ed42672916b..2b50f142bf303 100644 --- a/agent/consul/acl_replication_test.go +++ b/agent/consul/acl_replication_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/acl_replication_types.go b/agent/consul/acl_replication_types.go index 8a672cb90d566..6d6b68690682b 100644 --- a/agent/consul/acl_replication_types.go +++ b/agent/consul/acl_replication_types.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/acl_server.go b/agent/consul/acl_server.go index 8fe1f45fd0984..f91cb77620238 100644 --- a/agent/consul/acl_server.go +++ b/agent/consul/acl_server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/acl_server_ce.go b/agent/consul/acl_server_ce.go index 5adb8be5725d5..f2de1486a28af 100644 --- a/agent/consul/acl_server_ce.go +++ b/agent/consul/acl_server_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index a6d6a77405f5a..386c60e38e39f 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -21,7 +21,6 @@ import ( "github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" - "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" ) @@ -615,6 +614,10 @@ func (d *ACLResolverTestDelegate) ACLDatacenter() string { return d.datacenter } +func (d *ACLResolverTestDelegate) UseLegacyACLs() bool { + return d.legacy +} + func (d *ACLResolverTestDelegate) ResolveIdentityFromToken(token string) (bool, structs.ACLIdentity, error) { if !d.localTokens { return false, nil, nil @@ -727,6 +730,7 @@ func TestACLResolver_Disabled(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: false, datacenter: "dc1", + legacy: false, } r := newTestACLResolver(t, delegate, nil) @@ -741,6 +745,7 @@ func TestACLResolver_ResolveRootACL(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, } r := newTestACLResolver(t, delegate, nil) @@ -791,6 +796,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: true, localRoles: true, @@ -818,6 +824,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: true, localRoles: true, @@ -845,6 +852,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: true, localPolicies: false, localRoles: false, @@ -880,6 +888,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: true, localPolicies: false, localRoles: false, @@ -910,6 +919,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: true, localRoles: true, @@ -960,6 +970,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: true, localRoles: true, @@ -990,6 +1001,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: true, localPolicies: false, localRoles: false, @@ -1025,6 +1037,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: true, localPolicies: false, localRoles: false, @@ -1056,6 +1069,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: true, localPolicies: false, localRoles: false, @@ -1102,6 +1116,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: true, localPolicies: false, localRoles: false, @@ -1143,6 +1158,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: false, localRoles: false, @@ -1177,6 +1193,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: false, localRoles: false, @@ -1212,6 +1229,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: true, localRoles: true, @@ -1257,6 +1275,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: false, tokenReadFn: func(_ *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error { @@ -1320,6 +1339,7 @@ func TestACLResolver_DownPolicy(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: false, tokenReadFn: func(_ *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error { @@ -1377,6 +1397,7 @@ func TestACLResolver_DatacenterScoping(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: true, localPolicies: true, localRoles: true, @@ -1396,6 +1417,7 @@ func TestACLResolver_DatacenterScoping(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc2", + legacy: false, localTokens: true, localPolicies: true, localRoles: true, @@ -1431,6 +1453,7 @@ func TestACLResolver_Client(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: false, tokenReadFn: func(_ *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error { @@ -1521,6 +1544,7 @@ func TestACLResolver_Client(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: false, tokenReadFn: func(args *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error { @@ -1583,6 +1607,7 @@ func TestACLResolver_Client_TokensPoliciesAndRoles(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: false, localRoles: false, @@ -1599,6 +1624,7 @@ func TestACLResolver_LocalTokensPoliciesAndRoles(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: true, localPolicies: true, localRoles: true, @@ -1614,6 +1640,7 @@ func TestACLResolver_LocalPoliciesAndRoles(t *testing.T) { delegate := &ACLResolverTestDelegate{ enabled: true, datacenter: "dc1", + legacy: false, localTokens: false, localPolicies: true, localRoles: true, @@ -1951,48 +1978,6 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega }, }, }, - &structs.ACLToken{ - AccessorID: "359b9927-25fd-46b9-84c2-3470f848ec65", - SecretID: "found-synthetic-policy-5", - TemplatedPolicies: []*structs.ACLTemplatedPolicy{ - { - TemplateName: api.ACLTemplatedPolicyNodeName, - TemplateVariables: &structs.ACLTemplatedPolicyVariables{ - Name: "templated-test-node1", - }, - Datacenters: []string{"dc1"}, - }, - { - TemplateName: api.ACLTemplatedPolicyNodeName, - TemplateVariables: &structs.ACLTemplatedPolicyVariables{ - Name: "templated-test-node2", - }, - // as the resolver is in dc1 this identity should be ignored - Datacenters: []string{"dc2"}, - }, - }, - }, - &structs.ACLToken{ - AccessorID: "359b9927-25fd-46b9-84c2-3470f848ec65", - SecretID: "found-synthetic-policy-6", - TemplatedPolicies: []*structs.ACLTemplatedPolicy{ - { - TemplateName: api.ACLTemplatedPolicyNodeName, - TemplateVariables: &structs.ACLTemplatedPolicyVariables{ - Name: "templated-test-node3", - }, - Datacenters: []string{"dc1"}, - }, - { - TemplateName: api.ACLTemplatedPolicyNodeName, - TemplateVariables: &structs.ACLTemplatedPolicyVariables{ - Name: "templated-test-node4", - }, - // as the resolver is in dc1 this identity should be ignored - Datacenters: []string{"dc2"}, - }, - }, - }, }) // We resolve these tokens in the same cache session @@ -2058,22 +2043,6 @@ func testACLResolver_variousTokens(t *testing.T, delegate *ACLResolverTestDelega // ensure node identity for other DC is ignored require.Equal(t, acl.Deny, authz.NodeWrite("test-node-dc2", nil)) }) - t.Run("synthetic-policy-6", func(t *testing.T) { // templated policy - authz, err := r.ResolveToken("found-synthetic-policy-6") - require.NoError(t, err) - require.NotNil(t, authz) - - // spot check some random perms - require.Equal(t, acl.Deny, authz.ACLRead(nil)) - require.Equal(t, acl.Deny, authz.NodeWrite("foo", nil)) - // ensure we didn't bleed over to the other synthetic policy - require.Equal(t, acl.Deny, authz.NodeWrite("templated-test-node1", nil)) - // check our own synthetic policy - require.Equal(t, acl.Allow, authz.ServiceRead("literally-anything", nil)) - require.Equal(t, acl.Allow, authz.NodeWrite("templated-test-node3", nil)) - // ensure template identity for other DC is ignored - require.Equal(t, acl.Deny, authz.NodeWrite("templated-test-node4", nil)) - }) }) runTwiceAndReset("Anonymous", func(t *testing.T) { diff --git a/agent/consul/acl_token_exp.go b/agent/consul/acl_token_exp.go index 06559ce71d566..7f5de395c7a9d 100644 --- a/agent/consul/acl_token_exp.go +++ b/agent/consul/acl_token_exp.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/acl_token_exp_test.go b/agent/consul/acl_token_exp_test.go index 031c150dabb0c..949d54510ba54 100644 --- a/agent/consul/acl_token_exp_test.go +++ b/agent/consul/acl_token_exp_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/auth/binder.go b/agent/consul/auth/binder.go index ba66bc916bdbe..354fedc8f1854 100644 --- a/agent/consul/auth/binder.go +++ b/agent/consul/auth/binder.go @@ -1,10 +1,9 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package auth import ( - "errors" "fmt" "github.com/hashicorp/go-bexpr" @@ -36,17 +35,14 @@ func NewBinder(store BinderStateStore, datacenter string) *Binder { type BinderStateStore interface { ACLBindingRuleList(ws memdb.WatchSet, methodName string, entMeta *acl.EnterpriseMeta) (uint64, structs.ACLBindingRules, error) ACLRoleGetByName(ws memdb.WatchSet, roleName string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLRole, error) - ACLPolicyGetByName(ws memdb.WatchSet, policyName string, entMeta *acl.EnterpriseMeta) (uint64, *structs.ACLPolicy, error) } -// Bindings contains the ACL roles, service identities, node identities, policies, -// templated policies, and enterprise meta to be assigned to the created token. +// Bindings contains the ACL roles, service identities, node identities and +// enterprise meta to be assigned to the created token. type Bindings struct { Roles []structs.ACLTokenRoleLink ServiceIdentities []*structs.ACLServiceIdentity NodeIdentities []*structs.ACLNodeIdentity - Policies []structs.ACLTokenPolicyLink - TemplatedPolicies structs.ACLTemplatedPolicies EnterpriseMeta acl.EnterpriseMeta } @@ -59,9 +55,7 @@ func (b *Bindings) None() bool { return len(b.ServiceIdentities) == 0 && len(b.NodeIdentities) == 0 && - len(b.TemplatedPolicies) == 0 && - len(b.Roles) == 0 && - len(b.Policies) == 0 + len(b.Roles) == 0 } // Bind collects the ACL roles, service identities, etc. to be assigned to the @@ -92,60 +86,30 @@ func (b *Binder) Bind(authMethod *structs.ACLAuthMethod, verifiedIdentity *authm return &bindings, nil } - // Compute role, service identity, node identity or templated policy names by interpolating + // Compute role, service identity, or node identity names by interpolating // the identity's projected variables into the rule BindName templates. for _, rule := range matchingRules { + bindName, valid, err := computeBindName(rule.BindType, rule.BindName, verifiedIdentity.ProjectedVars) + switch { + case err != nil: + return nil, fmt.Errorf("cannot compute %q bind name for bind target: %w", rule.BindType, err) + case !valid: + return nil, fmt.Errorf("computed %q bind name for bind target is invalid: %q", rule.BindType, bindName) + } + switch rule.BindType { case structs.BindingRuleBindTypeService: - bindName, err := computeBindName(rule.BindName, verifiedIdentity.ProjectedVars, acl.IsValidServiceIdentityName) - if err != nil { - return nil, err - } bindings.ServiceIdentities = append(bindings.ServiceIdentities, &structs.ACLServiceIdentity{ ServiceName: bindName, }) case structs.BindingRuleBindTypeNode: - bindName, err := computeBindName(rule.BindName, verifiedIdentity.ProjectedVars, acl.IsValidNodeIdentityName) - if err != nil { - return nil, err - } bindings.NodeIdentities = append(bindings.NodeIdentities, &structs.ACLNodeIdentity{ NodeName: bindName, Datacenter: b.datacenter, }) - case structs.BindingRuleBindTypeTemplatedPolicy: - templatedPolicy, err := generateTemplatedPolicies(rule.BindName, rule.BindVars, verifiedIdentity.ProjectedVars) - if err != nil { - return nil, err - } - bindings.TemplatedPolicies = append(bindings.TemplatedPolicies, templatedPolicy) - - case structs.BindingRuleBindTypePolicy: - bindName, err := computeBindName(rule.BindName, verifiedIdentity.ProjectedVars, acl.IsValidRoleName) - if err != nil { - return nil, err - } - - _, policy, err := b.store.ACLPolicyGetByName(nil, bindName, &bindings.EnterpriseMeta) - if err != nil { - return nil, err - } - - if policy != nil { - bindings.Policies = append(bindings.Policies, structs.ACLTokenPolicyLink{ - ID: policy.ID, - Name: policy.Name, - }) - } - case structs.BindingRuleBindTypeRole: - bindName, err := computeBindName(rule.BindName, verifiedIdentity.ProjectedVars, acl.IsValidRoleName) - if err != nil { - return nil, err - } - _, role, err := b.store.ACLRoleGetByName(nil, bindName, &bindings.EnterpriseMeta) if err != nil { return nil, err @@ -162,11 +126,11 @@ func (b *Binder) Bind(authMethod *structs.ACLAuthMethod, verifiedIdentity *authm return &bindings, nil } -// IsValidBindingRule returns whether the given BindName and/or BindVars template produces valid +// IsValidBindName returns whether the given BindName template produces valid // results when interpolating the auth method's available variables. -func IsValidBindingRule(bindType, bindName string, bindVars *structs.ACLTemplatedPolicyVariables, availableVariables []string) error { +func IsValidBindName(bindType, bindName string, availableVariables []string) (bool, error) { if bindType == "" || bindName == "" { - return errors.New("bindType and bindName must not be empty") + return false, nil } fakeVarMap := make(map[string]string) @@ -174,101 +138,38 @@ func IsValidBindingRule(bindType, bindName string, bindVars *structs.ACLTemplate fakeVarMap[v] = "fake" } - switch bindType { - case structs.BindingRuleBindTypeService: - if _, err := computeBindName(bindName, fakeVarMap, acl.IsValidServiceIdentityName); err != nil { - return fmt.Errorf("failed to validate bindType %q: %w", bindType, err) - } - - case structs.BindingRuleBindTypeNode: - if _, err := computeBindName(bindName, fakeVarMap, acl.IsValidNodeIdentityName); err != nil { - return fmt.Errorf("failed to validate bindType %q: %w", bindType, err) - } - - case structs.BindingRuleBindTypeTemplatedPolicy: - // If user-defined templated policies are supported in the future, - // we will need to lookup state to ensure a template exists for given - // bindName. A possible solution is to rip out the check for templated - // policy into its own step which has access to the state store. - if _, err := generateTemplatedPolicies(bindName, bindVars, fakeVarMap); err != nil { - return fmt.Errorf("failed to validate bindType %q: %w", bindType, err) - } - - case structs.BindingRuleBindTypeRole: - if _, err := computeBindName(bindName, fakeVarMap, acl.IsValidRoleName); err != nil { - return fmt.Errorf("failed to validate bindType %q: %w", bindType, err) - } - - case structs.BindingRuleBindTypePolicy: - if _, err := computeBindName(bindName, fakeVarMap, acl.IsValidPolicyName); err != nil { - return fmt.Errorf("failed to validate bindType %q: %w", bindType, err) - } - default: - return fmt.Errorf("invalid Binding Rule: unknown BindType %q", bindType) - } - - return nil -} - -// computeBindName interprets given HIL bindName with any given variables in projectedVars. -// validate (if not nil) will be called on the interpreted string. -func computeBindName(bindName string, projectedVars map[string]string, validate func(string) bool) (string, error) { - computed, err := template.InterpolateHIL(bindName, projectedVars, true) + _, valid, err := computeBindName(bindType, bindName, fakeVarMap) if err != nil { - return "", fmt.Errorf("error interpreting template: %w", err) - } - if validate != nil && !validate(computed) { - return "", fmt.Errorf("invalid bind name: %q", computed) + return false, err } - return computed, nil + return valid, nil } -// generateTemplatedPolicies fetches a templated policy by bindName then attempts to interpret -// bindVars with any given variables in projectedVars. The resulting template is validated -// by the template's schema. -func generateTemplatedPolicies( - bindName string, - bindVars *structs.ACLTemplatedPolicyVariables, - projectedVars map[string]string, -) (*structs.ACLTemplatedPolicy, error) { - baseTemplate, ok := structs.GetACLTemplatedPolicyBase(bindName) - if !ok { - return nil, fmt.Errorf("Bind name for templated-policy bind type does not match existing template name: %s", bindName) - } - - computedBindVars, err := computeBindVars(bindVars, projectedVars) +// computeBindName processes the HIL for the provided bind type+name using the +// projected variables. +// +// - If the HIL is invalid ("", false, AN_ERROR) is returned. +// - If the computed name is not valid for the type ("INVALID_NAME", false, nil) is returned. +// - If the computed name is valid for the type ("VALID_NAME", true, nil) is returned. +func computeBindName(bindType, bindName string, projectedVars map[string]string) (string, bool, error) { + bindName, err := template.InterpolateHIL(bindName, projectedVars, true) if err != nil { - return nil, fmt.Errorf("failed to interpret templated policy variables: %w", err) - } - - out := &structs.ACLTemplatedPolicy{ - TemplateName: bindName, - TemplateVariables: computedBindVars, - TemplateID: baseTemplate.TemplateID, - } - - if err := out.ValidateTemplatedPolicy(baseTemplate.Schema); err != nil { - return nil, fmt.Errorf("templated policy failed validation: %w", err) + return "", false, err } - return out, nil -} - -func computeBindVars(bindVars *structs.ACLTemplatedPolicyVariables, projectedVars map[string]string) (*structs.ACLTemplatedPolicyVariables, error) { - if bindVars == nil { - return nil, nil - } - - out := &structs.ACLTemplatedPolicyVariables{} - if bindVars.Name != "" { - nameValue, err := template.InterpolateHIL(bindVars.Name, projectedVars, true) - if err != nil { - return nil, err - } - out.Name = nameValue + var valid bool + switch bindType { + case structs.BindingRuleBindTypeService: + valid = acl.IsValidServiceIdentityName(bindName) + case structs.BindingRuleBindTypeNode: + valid = acl.IsValidNodeIdentityName(bindName) + case structs.BindingRuleBindTypeRole: + valid = acl.IsValidRoleName(bindName) + default: + return "", false, fmt.Errorf("unknown binding rule bind type: %s", bindType) } - return out, nil + return bindName, valid, nil } // doesSelectorMatch checks that a single selector matches the provided vars. diff --git a/agent/consul/auth/binder_ce.go b/agent/consul/auth/binder_ce.go index 2b1a474a407cb..f6fa5e5e841c1 100644 --- a/agent/consul/auth/binder_ce.go +++ b/agent/consul/auth/binder_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package auth diff --git a/agent/consul/auth/binder_test.go b/agent/consul/auth/binder_test.go index 3220ff2b303c1..b86d4526dd006 100644 --- a/agent/consul/auth/binder_test.go +++ b/agent/consul/auth/binder_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package auth @@ -15,7 +15,6 @@ import ( "github.com/hashicorp/consul/agent/consul/authmethod" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" ) func TestBindings_None(t *testing.T) { @@ -28,79 +27,11 @@ func TestBindings_None(t *testing.T) { b = &Bindings{Roles: []structs.ACLTokenRoleLink{{ID: generateID(t)}}} require.False(t, b.None()) - b = &Bindings{Policies: []structs.ACLTokenPolicyLink{{ID: generateID(t)}}} - require.False(t, b.None()) - b = &Bindings{ServiceIdentities: []*structs.ACLServiceIdentity{{ServiceName: "web"}}} require.False(t, b.None()) b = &Bindings{NodeIdentities: []*structs.ACLNodeIdentity{{NodeName: "node-123"}}} require.False(t, b.None()) - - b = &Bindings{TemplatedPolicies: []*structs.ACLTemplatedPolicy{{TemplateName: api.ACLTemplatedPolicyDNSName}}} - require.False(t, b.None()) -} - -func TestBinder_Policy_Success(t *testing.T) { - store := testStateStore(t) - binder := &Binder{store: store} - - authMethod := &structs.ACLAuthMethod{ - Name: "test-auth-method", - Type: "testing", - } - require.NoError(t, store.ACLAuthMethodSet(0, authMethod)) - - targetPolicy := &structs.ACLPolicy{ - ID: generateID(t), - Name: "foo-policy", - } - require.NoError(t, store.ACLPolicySet(0, targetPolicy)) - - otherPolicy := &structs.ACLPolicy{ - ID: generateID(t), - Name: "not-my-policy", - } - require.NoError(t, store.ACLPolicySet(0, otherPolicy)) - - bindingRules := structs.ACLBindingRules{ - { - ID: generateID(t), - Selector: "role==engineer", - BindType: structs.BindingRuleBindTypePolicy, - BindName: "${editor}-policy", - AuthMethod: authMethod.Name, - }, - { - ID: generateID(t), - Selector: "role==engineer", - BindType: structs.BindingRuleBindTypePolicy, - BindName: "this-policy-does-not-exist", - AuthMethod: authMethod.Name, - }, - { - ID: generateID(t), - Selector: "language==js", - BindType: structs.BindingRuleBindTypePolicy, - BindName: otherPolicy.Name, - AuthMethod: authMethod.Name, - }, - } - require.NoError(t, store.ACLBindingRuleBatchSet(0, bindingRules)) - - result, err := binder.Bind(&structs.ACLAuthMethod{}, &authmethod.Identity{ - SelectableFields: map[string]string{ - "role": "engineer", - "language": "go", - }, - ProjectedVars: map[string]string{ - "editor": "foo", - }, - }) - require.NoError(t, err) - require.Equal(t, []structs.ACLTokenPolicyLink{ - {ID: targetPolicy.ID, Name: targetPolicy.Name}, - }, result.Policies) } func TestBinder_Roles_Success(t *testing.T) { @@ -188,33 +119,7 @@ func TestBinder_Roles_NameValidation(t *testing.T) { _, err := binder.Bind(&structs.ACLAuthMethod{}, &authmethod.Identity{}) require.Error(t, err) - require.Contains(t, err.Error(), "invalid bind name") -} - -func TestBinder_Policy_NameValidation(t *testing.T) { - store := testStateStore(t) - binder := &Binder{store: store} - - authMethod := &structs.ACLAuthMethod{ - Name: "test-auth-method", - Type: "testing", - } - require.NoError(t, store.ACLAuthMethodSet(0, authMethod)) - - bindingRules := structs.ACLBindingRules{ - { - ID: generateID(t), - Selector: "", - BindType: structs.BindingRuleBindTypePolicy, - BindName: "INVALID!", - AuthMethod: authMethod.Name, - }, - } - require.NoError(t, store.ACLBindingRuleBatchSet(0, bindingRules)) - - _, err := binder.Bind(&structs.ACLAuthMethod{}, &authmethod.Identity{}) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid bind name") + require.Contains(t, err.Error(), "bind name for bind target is invalid") } func TestBinder_ServiceIdentities_Success(t *testing.T) { @@ -282,7 +187,7 @@ func TestBinder_ServiceIdentities_NameValidation(t *testing.T) { _, err := binder.Bind(&structs.ACLAuthMethod{}, &authmethod.Identity{}) require.Error(t, err) - require.Contains(t, err.Error(), "invalid bind name") + require.Contains(t, err.Error(), "bind name for bind target is invalid") } func TestBinder_NodeIdentities_Success(t *testing.T) { @@ -350,105 +255,82 @@ func TestBinder_NodeIdentities_NameValidation(t *testing.T) { _, err := binder.Bind(&structs.ACLAuthMethod{}, &authmethod.Identity{}) require.Error(t, err) - require.Contains(t, err.Error(), "invalid bind name") + require.Contains(t, err.Error(), "bind name for bind target is invalid") } -func Test_IsValidBindingRule(t *testing.T) { +func Test_IsValidBindName(t *testing.T) { type testcase struct { name string bindType string bindName string - bindVars *structs.ACLTemplatedPolicyVariables fields string - err bool + valid bool // valid HIL, invalid contents + err bool // invalid HIL } for _, test := range []testcase{ {"no bind type", - "", "", nil, "", true}, + "", "", "", false, false}, {"bad bind type", - "invalid", "blah", nil, "", true}, + "invalid", "blah", "", false, true}, // valid HIL, invalid name {"empty", - "all", "", nil, "", true}, + "both", "", "", false, false}, {"just end", - "all", "}", nil, "", true}, + "both", "}", "", false, false}, {"var without start", - "all", " item }", nil, "item", true}, + "both", " item }", "item", false, false}, {"two vars missing second start", - "all", "before-${ item }after--more }", nil, "item,more", true}, + "both", "before-${ item }after--more }", "item,more", false, false}, // names for the two types are validated differently {"@ is disallowed", - "all", "bad@name", nil, "", true}, - {"leading dash", - "role", "-name", nil, "", false}, + "both", "bad@name", "", false, false}, {"leading dash", - "policy", "-name", nil, "", false}, + "role", "-name", "", true, false}, {"leading dash", - "service", "-name", nil, "", true}, + "service", "-name", "", false, false}, {"trailing dash", - "role", "name-", nil, "", false}, + "role", "name-", "", true, false}, {"trailing dash", - "policy", "name-", nil, "", false}, - {"trailing dash", - "service", "name-", nil, "", true}, + "service", "name-", "", false, false}, {"inner dash", - "all", "name-end", nil, "", false}, - {"upper case", - "role", "NAME", nil, "", false}, + "both", "name-end", "", true, false}, {"upper case", - "policy", "NAME", nil, "", false}, + "role", "NAME", "", true, false}, {"upper case", - "service", "NAME", nil, "", true}, + "service", "NAME", "", false, false}, // valid HIL, valid name {"no vars", - "all", "nothing", nil, "", false}, + "both", "nothing", "", true, false}, {"just var", - "all", "${item}", nil, "item", false}, + "both", "${item}", "item", true, false}, {"var in middle", - "all", "before-${item}after", nil, "item", false}, + "both", "before-${item}after", "item", true, false}, {"two vars", - "all", "before-${item}after-${more}", nil, "item,more", false}, + "both", "before-${item}after-${more}", "item,more", true, false}, // bad {"no bind name", - "all", "", nil, "", true}, + "both", "", "", false, false}, {"just start", - "all", "${", nil, "", true}, + "both", "${", "", false, true}, {"backwards", - "all", "}${", nil, "", true}, + "both", "}${", "", false, true}, {"no varname", - "all", "${}", nil, "", true}, + "both", "${}", "", false, true}, {"missing map key", - "all", "${item}", nil, "", true}, + "both", "${item}", "", false, true}, {"var without end", - "all", "${ item ", nil, "item", true}, + "both", "${ item ", "item", false, true}, {"two vars missing first end", - "all", "before-${ item after-${ more }", nil, "item,more", true}, - - // bind type: templated policy - bad input - {"templated-policy missing bindvars", "templated-policy", "builtin/service", nil, "", true}, - {"templated-policy with unknown templated policy name", - "templated-policy", "builtin/service", &structs.ACLTemplatedPolicyVariables{Name: "before-${item}after-${more}"}, "", true}, - {"templated-policy with correct bindvars and unknown vars", - "templated-policy", "builtin/fake", &structs.ACLTemplatedPolicyVariables{Name: "test"}, "", true}, - {"templated-policy with correct bindvars but incorrect HIL", - "templated-policy", "builtin/service", &structs.ACLTemplatedPolicyVariables{Name: "before-${ item }after--more }"}, "", true}, - - // bind type: templated policy - good input - {"templated-policy with appropriate bindvars", - "templated-policy", "builtin/service", &structs.ACLTemplatedPolicyVariables{Name: "before-${item}after-${more}"}, "item,more", false}, + "both", "before-${ item after-${ more }", "item,more", false, true}, } { var cases []testcase - if test.bindType == "all" { + if test.bindType == "both" { test1 := test test1.bindType = "role" test2 := test test2.bindType = "service" - test3 := test - test3.bindType = "policy" - test4 := test - test4.bindType = "node" - cases = []testcase{test1, test2, test3, test4} + cases = []testcase{test1, test2} } else { cases = []testcase{test} } @@ -457,13 +339,18 @@ func Test_IsValidBindingRule(t *testing.T) { test := test t.Run(test.bindType+"--"+test.name, func(t *testing.T) { t.Parallel() - err := IsValidBindingRule( + valid, err := IsValidBindName( test.bindType, test.bindName, - test.bindVars, strings.Split(test.fields, ","), ) - require.Equal(t, test.err, err != nil) + if test.err { + require.NotNil(t, err) + require.False(t, valid) + } else { + require.NoError(t, err) + require.Equal(t, test.valid, valid) + } }) } } diff --git a/agent/consul/auth/login.go b/agent/consul/auth/login.go index 0b9b5eeac77ae..9592e5a841d6f 100644 --- a/agent/consul/auth/login.go +++ b/agent/consul/auth/login.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package auth @@ -46,9 +46,7 @@ func (l *Login) TokenForVerifiedIdentity(identity *authmethod.Identity, authMeth ExpirationTTL: authMethod.MaxTokenTTL, ServiceIdentities: bindings.ServiceIdentities, NodeIdentities: bindings.NodeIdentities, - TemplatedPolicies: bindings.TemplatedPolicies, Roles: bindings.Roles, - Policies: bindings.Policies, EnterpriseMeta: bindings.EnterpriseMeta, } token.ACLAuthMethodEnterpriseMeta.FillWithEnterpriseMeta(&authMethod.EnterpriseMeta) diff --git a/agent/consul/auth/token_writer.go b/agent/consul/auth/token_writer.go index 0112d0387c56d..857a2e3d13213 100644 --- a/agent/consul/auth/token_writer.go +++ b/agent/consul/auth/token_writer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package auth @@ -309,12 +309,6 @@ func (w *TokenWriter) write(token, existing *structs.ACLToken, fromLogin bool) ( } token.NodeIdentities = nodeIdentities - templatedPolicies, err := w.normalizeTemplatedPolicies(token.TemplatedPolicies) - if err != nil { - return nil, err - } - token.TemplatedPolicies = templatedPolicies - if err := w.enterpriseValidation(token, existing); err != nil { return nil, err } @@ -448,32 +442,3 @@ func (w *TokenWriter) normalizeNodeIdentities(nodeIDs structs.ACLNodeIdentities) } return nodeIDs.Deduplicate(), nil } - -func (w *TokenWriter) normalizeTemplatedPolicies(templatedPolicies structs.ACLTemplatedPolicies) (structs.ACLTemplatedPolicies, error) { - if len(templatedPolicies) == 0 { - return templatedPolicies, nil - } - - finalPolicies := make(structs.ACLTemplatedPolicies, 0, len(templatedPolicies)) - for _, templatedPolicy := range templatedPolicies { - if templatedPolicy.TemplateName == "" { - return nil, errors.New("templated policy is missing the template name field on this token") - } - - tmp, ok := structs.GetACLTemplatedPolicyBase(templatedPolicy.TemplateName) - if !ok { - return nil, fmt.Errorf("no such ACL templated policy with Name %q", templatedPolicy.TemplateName) - } - - out := templatedPolicy.Clone() - out.TemplateID = tmp.TemplateID - - err := templatedPolicy.ValidateTemplatedPolicy(tmp.Schema) - if err != nil { - return nil, fmt.Errorf("validation error for templated policy %q: %w", templatedPolicy.TemplateName, err) - } - finalPolicies = append(finalPolicies, out) - } - - return finalPolicies.Deduplicate(), nil -} diff --git a/agent/consul/auth/token_writer_ce.go b/agent/consul/auth/token_writer_ce.go index fe8840138a823..b0ad9e833bb0e 100644 --- a/agent/consul/auth/token_writer_ce.go +++ b/agent/consul/auth/token_writer_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package auth diff --git a/agent/consul/auth/token_writer_test.go b/agent/consul/auth/token_writer_test.go index 3206476a504d9..51a2b3cc45a83 100644 --- a/agent/consul/auth/token_writer_test.go +++ b/agent/consul/auth/token_writer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package auth @@ -15,7 +15,6 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" ) func TestTokenWriter_Create_Validation(t *testing.T) { @@ -358,59 +357,6 @@ func TestTokenWriter_NodeIdentities(t *testing.T) { } } -func TestTokenWriter_TemplatedPolicies(t *testing.T) { - aclCache := &MockACLCache{} - aclCache.On("RemoveIdentityWithSecretToken", mock.Anything) - - store := testStateStore(t) - - writer := buildTokenWriter(store, aclCache) - - testCases := map[string]struct { - input []*structs.ACLTemplatedPolicy - output []*structs.ACLTemplatedPolicy - errorContains string - }{ - "missing templated policy name": { - input: []*structs.ACLTemplatedPolicy{{TemplateName: ""}}, - errorContains: "templated policy is missing the template name field on this token", - }, - "invalid templated policy name": { - input: []*structs.ACLTemplatedPolicy{{TemplateName: "faketemplate"}}, - errorContains: "no such ACL templated policy with Name \"faketemplate\"", - }, - "missing required template variable: name": { - input: []*structs.ACLTemplatedPolicy{{TemplateName: api.ACLTemplatedPolicyServiceName, Datacenters: []string{"dc1"}}}, - errorContains: "validation error for templated policy \"builtin/service\"", - }, - "duplicate templated policies are removed and ids are set": { - input: []*structs.ACLTemplatedPolicy{ - {TemplateName: api.ACLTemplatedPolicyServiceName, TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "web"}}, - {TemplateName: api.ACLTemplatedPolicyServiceName, TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "web"}}, - {TemplateName: api.ACLTemplatedPolicyServiceName, TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "api"}}, - {TemplateName: api.ACLTemplatedPolicyNodeName, TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "nodename"}}, - }, - output: []*structs.ACLTemplatedPolicy{ - {TemplateID: structs.ACLTemplatedPolicyServiceID, TemplateName: api.ACLTemplatedPolicyServiceName, TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "web"}}, - {TemplateID: structs.ACLTemplatedPolicyServiceID, TemplateName: api.ACLTemplatedPolicyServiceName, TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "api"}}, - {TemplateID: structs.ACLTemplatedPolicyNodeID, TemplateName: api.ACLTemplatedPolicyNodeName, TemplateVariables: &structs.ACLTemplatedPolicyVariables{Name: "nodename"}}, - }, - }, - } - for desc, tc := range testCases { - t.Run(desc, func(t *testing.T) { - updated, err := writer.Create(&structs.ACLToken{TemplatedPolicies: tc.input}, false) - if tc.errorContains == "" { - require.NoError(t, err) - require.ElementsMatch(t, tc.output, updated.TemplatedPolicies) - } else { - require.Error(t, err) - require.Contains(t, err.Error(), tc.errorContains) - } - }) - } -} - func TestTokenWriter_Create_Expiration(t *testing.T) { aclCache := &MockACLCache{} aclCache.On("RemoveIdentityWithSecretToken", mock.Anything) diff --git a/agent/consul/authmethod/authmethods.go b/agent/consul/authmethod/authmethods.go index d03e2b410cb4f..946fce927e697 100644 --- a/agent/consul/authmethod/authmethods.go +++ b/agent/consul/authmethod/authmethods.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package authmethod diff --git a/agent/consul/authmethod/authmethods_ce.go b/agent/consul/authmethod/authmethods_ce.go index 38b27b70115bc..0839b4aba5a74 100644 --- a/agent/consul/authmethod/authmethods_ce.go +++ b/agent/consul/authmethod/authmethods_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package authmethod diff --git a/agent/consul/authmethod/awsauth/aws.go b/agent/consul/authmethod/awsauth/aws.go index 3381a893fa5da..d2cd73482cde5 100644 --- a/agent/consul/authmethod/awsauth/aws.go +++ b/agent/consul/authmethod/awsauth/aws.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package awsauth diff --git a/agent/consul/authmethod/awsauth/aws_test.go b/agent/consul/authmethod/awsauth/aws_test.go index 279e4b3e46d48..7a894cc217878 100644 --- a/agent/consul/authmethod/awsauth/aws_test.go +++ b/agent/consul/authmethod/awsauth/aws_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package awsauth diff --git a/agent/consul/authmethod/kubeauth/k8s.go b/agent/consul/authmethod/kubeauth/k8s.go index 274dd2ec9d03a..f71157cbeccb7 100644 --- a/agent/consul/authmethod/kubeauth/k8s.go +++ b/agent/consul/authmethod/kubeauth/k8s.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package kubeauth diff --git a/agent/consul/authmethod/kubeauth/k8s_ce.go b/agent/consul/authmethod/kubeauth/k8s_ce.go index b180836948cf3..b2b7e8a2d8715 100644 --- a/agent/consul/authmethod/kubeauth/k8s_ce.go +++ b/agent/consul/authmethod/kubeauth/k8s_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package kubeauth diff --git a/agent/consul/authmethod/kubeauth/k8s_test.go b/agent/consul/authmethod/kubeauth/k8s_test.go index 48ef6e61c4839..95decce11597c 100644 --- a/agent/consul/authmethod/kubeauth/k8s_test.go +++ b/agent/consul/authmethod/kubeauth/k8s_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package kubeauth diff --git a/agent/consul/authmethod/kubeauth/testing.go b/agent/consul/authmethod/kubeauth/testing.go index 38b7d9c330a3f..e5538bb90998f 100644 --- a/agent/consul/authmethod/kubeauth/testing.go +++ b/agent/consul/authmethod/kubeauth/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package kubeauth diff --git a/agent/consul/authmethod/ssoauth/sso.go b/agent/consul/authmethod/ssoauth/sso.go index 398f5689799b3..6215c0eafe719 100644 --- a/agent/consul/authmethod/ssoauth/sso.go +++ b/agent/consul/authmethod/ssoauth/sso.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ssoauth diff --git a/agent/consul/authmethod/ssoauth/sso_ce.go b/agent/consul/authmethod/ssoauth/sso_ce.go index 504329d20089c..74e3be3082ccb 100644 --- a/agent/consul/authmethod/ssoauth/sso_ce.go +++ b/agent/consul/authmethod/ssoauth/sso_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package ssoauth diff --git a/agent/consul/authmethod/ssoauth/sso_test.go b/agent/consul/authmethod/ssoauth/sso_test.go index 357612fad6894..840e37b86fd46 100644 --- a/agent/consul/authmethod/ssoauth/sso_test.go +++ b/agent/consul/authmethod/ssoauth/sso_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package ssoauth diff --git a/agent/consul/authmethod/testauth/testing.go b/agent/consul/authmethod/testauth/testing.go index ead9ae081a7ea..9f6c85ae23d5e 100644 --- a/agent/consul/authmethod/testauth/testing.go +++ b/agent/consul/authmethod/testauth/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package testauth diff --git a/agent/consul/authmethod/testauth/testing_ce.go b/agent/consul/authmethod/testauth/testing_ce.go index 460f30fe4f798..f4b909b4b8127 100644 --- a/agent/consul/authmethod/testauth/testing_ce.go +++ b/agent/consul/authmethod/testauth/testing_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package testauth diff --git a/agent/consul/authmethod/testing.go b/agent/consul/authmethod/testing.go index 0f43e5e5201fc..933082a5b4295 100644 --- a/agent/consul/authmethod/testing.go +++ b/agent/consul/authmethod/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package authmethod diff --git a/agent/consul/auto_config_backend.go b/agent/consul/auto_config_backend.go index 0aaccfa35d99b..78413fe0121c1 100644 --- a/agent/consul/auto_config_backend.go +++ b/agent/consul/auto_config_backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -34,7 +34,7 @@ func (b autoConfigBackend) GetCARoots() (*structs.IndexedCARoots, error) { } // DatacenterJoinAddresses will return all the strings suitable for usage in -// retry join operations to connect to the LAN or LAN segment gossip pool. +// retry join operations to connect to the the LAN or LAN segment gossip pool. func (b autoConfigBackend) DatacenterJoinAddresses(partition, segment string) ([]string, error) { members, err := b.Server.LANMembers(LANMemberFilter{ Segment: segment, diff --git a/agent/consul/auto_config_backend_test.go b/agent/consul/auto_config_backend_test.go index 00c0dc10d80d7..6a4a202fceb8e 100644 --- a/agent/consul/auto_config_backend_test.go +++ b/agent/consul/auto_config_backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/auto_config_endpoint.go b/agent/consul/auto_config_endpoint.go index 6c4a1b4f71f4b..808aa63304dcd 100644 --- a/agent/consul/auto_config_endpoint.go +++ b/agent/consul/auto_config_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -11,12 +11,12 @@ import ( "regexp" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/internal/dnsutil" bexpr "github.com/hashicorp/go-bexpr" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/authmethod/ssoauth" + "github.com/hashicorp/consul/agent/dns" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib/template" "github.com/hashicorp/consul/proto/private/pbautoconf" @@ -80,7 +80,7 @@ func (a *jwtAuthorizer) Authorize(req *pbautoconf.AutoConfigRequest) (AutoConfig if invalidSegmentName.MatchString(req.Segment) { return AutoConfigOptions{}, fmt.Errorf("Invalid request field. %v = `%v`", "segment", req.Segment) } - if req.Partition != "" && !dnsutil.IsValidLabel(req.Partition) { + if req.Partition != "" && !dns.IsValidLabel(req.Partition) { return AutoConfigOptions{}, fmt.Errorf("Invalid request field. %v = `%v`", "partition", req.Partition) } diff --git a/agent/consul/auto_config_endpoint_test.go b/agent/consul/auto_config_endpoint_test.go index 39c4f3a7a5efa..a3f485ee60c2a 100644 --- a/agent/consul/auto_config_endpoint_test.go +++ b/agent/consul/auto_config_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/auto_encrypt_endpoint.go b/agent/consul/auto_encrypt_endpoint.go index dbd39355cfbf3..b893e783215ad 100644 --- a/agent/consul/auto_encrypt_endpoint.go +++ b/agent/consul/auto_encrypt_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/auto_encrypt_endpoint_test.go b/agent/consul/auto_encrypt_endpoint_test.go index f3b2320116340..d8124f9fb3623 100644 --- a/agent/consul/auto_encrypt_endpoint_test.go +++ b/agent/consul/auto_encrypt_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/autopilot.go b/agent/consul/autopilot.go index 70391f6b99107..f682ffed6f164 100644 --- a/agent/consul/autopilot.go +++ b/agent/consul/autopilot.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/autopilot_ce.go b/agent/consul/autopilot_ce.go index 4a95e69e23fed..92f9b4ccae41d 100644 --- a/agent/consul/autopilot_ce.go +++ b/agent/consul/autopilot_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/autopilot_test.go b/agent/consul/autopilot_test.go index 8d1d214b17104..4429340eda5a8 100644 --- a/agent/consul/autopilot_test.go +++ b/agent/consul/autopilot_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/autopilotevents/ready_servers_events.go b/agent/consul/autopilotevents/ready_servers_events.go index 16e064aa7905d..404276f3ec2d5 100644 --- a/agent/consul/autopilotevents/ready_servers_events.go +++ b/agent/consul/autopilotevents/ready_servers_events.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autopilotevents diff --git a/agent/consul/autopilotevents/ready_servers_events_test.go b/agent/consul/autopilotevents/ready_servers_events_test.go index 16d78b52e3b5f..994020c290c0d 100644 --- a/agent/consul/autopilotevents/ready_servers_events_test.go +++ b/agent/consul/autopilotevents/ready_servers_events_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package autopilotevents diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 36426afe253a2..c53c413567391 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index 628ad83ae4db0..192a3d6d7d275 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/client.go b/agent/consul/client.go index f47a2871d130b..1c7ac12177600 100644 --- a/agent/consul/client.go +++ b/agent/consul/client.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -25,7 +25,6 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" - "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/types" ) @@ -94,9 +93,6 @@ type Client struct { EnterpriseClient tlsConfigurator *tlsutil.Configurator - - // resourceServiceClient is a client for the gRPC Resource Service. - resourceServiceClient pbresource.ResourceServiceClient } // NewClient creates and returns a Client @@ -107,7 +103,7 @@ func NewClient(config *Config, deps Deps) (*Client, error) { if config.DataDir == "" { return nil, fmt.Errorf("Config must provide a DataDir") } - if err := config.CheckEnumStrings(); err != nil { + if err := config.CheckACL(); err != nil { return nil, err } @@ -155,13 +151,6 @@ func NewClient(config *Config, deps Deps) (*Client, error) { } c.router = deps.Router - conn, err := deps.GRPCConnPool.ClientConn(deps.ConnPool.Datacenter) - if err != nil { - c.Shutdown() - return nil, fmt.Errorf("Failed to get gRPC client connection: %w", err) - } - c.resourceServiceClient = pbresource.NewResourceServiceClient(conn) - // Start LAN event handlers after the router is complete since the event // handlers depend on the router and the router depends on Serf. go c.lanEventHandler() @@ -464,7 +453,3 @@ func (c *Client) AgentEnterpriseMeta() *acl.EnterpriseMeta { func (c *Client) agentSegmentName() string { return c.config.Segment } - -func (c *Client) ResourceServiceClient() pbresource.ResourceServiceClient { - return c.resourceServiceClient -} diff --git a/agent/consul/client_serf.go b/agent/consul/client_serf.go index c92fdd1726c30..7d68b50395e49 100644 --- a/agent/consul/client_serf.go +++ b/agent/consul/client_serf.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/client_test.go b/agent/consul/client_test.go index b308450800b8a..4b8f5c433d8e7 100644 --- a/agent/consul/client_test.go +++ b/agent/consul/client_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -504,7 +504,7 @@ func newClient(t *testing.T, config *Config) *Client { return client } -func newTestResolverConfig(t testutil.TestingTB, suffix string, dc, agentType string) resolver.Config { +func newTestResolverConfig(t *testing.T, suffix string, dc, agentType string) resolver.Config { n := t.Name() s := strings.Replace(n, "/", "", -1) s = strings.Replace(s, "_", "", -1) @@ -515,7 +515,7 @@ func newTestResolverConfig(t testutil.TestingTB, suffix string, dc, agentType st } } -func newDefaultDeps(t testutil.TestingTB, c *Config) Deps { +func newDefaultDeps(t *testing.T, c *Config) Deps { t.Helper() logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ @@ -576,7 +576,6 @@ func newDefaultDeps(t testutil.TestingTB, c *Config) Deps { GetNetRPCInterceptorFunc: middleware.GetNetRPCInterceptor, EnterpriseDeps: newDefaultDepsEnterprise(t, logger, c), XDSStreamLimiter: limiter.NewSessionLimiter(), - Registry: NewTypeRegistry(), } } diff --git a/agent/consul/cluster_test.go b/agent/consul/cluster_test.go index 7cc266908198c..925d32a610692 100644 --- a/agent/consul/cluster_test.go +++ b/agent/consul/cluster_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/config.go b/agent/consul/config.go index a5ab21f7311aa..eef4bc4376f4e 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/consul/agent/checks" consulrate "github.com/hashicorp/consul/agent/consul/rate" - hcpconfig "github.com/hashicorp/consul/agent/hcp/config" "github.com/hashicorp/consul/agent/structs" libserf "github.com/hashicorp/consul/lib/serf" "github.com/hashicorp/consul/tlsutil" @@ -41,9 +40,8 @@ const ( // LogStoreBackend* are well-known string values used to configure different // log store backends. - LogStoreBackendDefault = "default" - LogStoreBackendBoltDB = "boltdb" - LogStoreBackendWAL = "wal" + LogStoreBackendBoltDB = "boltdb" + LogStoreBackendWAL = "wal" ) var ( @@ -411,13 +409,6 @@ type Config struct { // datacenters should exclusively traverse mesh gateways. ConnectMeshGatewayWANFederationEnabled bool - // DefaultIntentionPolicy is used to define a default intention action for all - // sources and destinations. Possible values are "allow", "deny", or "" (blank). - // For compatibility, falls back to ACLResolverSettings.ACLDefaultPolicy (which - // itself has a default of "allow") if left blank. Future versions of Consul - // will default this field to "deny" to be secure by default. - DefaultIntentionPolicy string - // DisableFederationStateAntiEntropy solely exists for use in unit tests to // disable a background routine. DisableFederationStateAntiEntropy bool @@ -450,7 +441,7 @@ type Config struct { Locality *structs.Locality - Cloud hcpconfig.CloudConfig + Cloud CloudConfig Reporting Reporting @@ -477,17 +468,21 @@ func (c *Config) CheckProtocolVersion() error { return nil } -// CheckEnumStrings validates string configuration which must be specific values. -func (c *Config) CheckEnumStrings() error { - if err := c.ACLResolverSettings.CheckACLs(); err != nil { - return err +// CheckACL validates the ACL configuration. +// TODO: move this to ACLResolverSettings +func (c *Config) CheckACL() error { + switch c.ACLResolverSettings.ACLDefaultPolicy { + case "allow": + case "deny": + default: + return fmt.Errorf("Unsupported default ACL policy: %s", c.ACLResolverSettings.ACLDefaultPolicy) } - switch c.DefaultIntentionPolicy { - case structs.IntentionDefaultPolicyAllow: - case structs.IntentionDefaultPolicyDeny: - case "": + switch c.ACLResolverSettings.ACLDownPolicy { + case "allow": + case "deny": + case "async-cache", "extend-cache": default: - return fmt.Errorf("Unsupported default intention policy: %s", c.DefaultIntentionPolicy) + return fmt.Errorf("Unsupported down ACL policy: %s", c.ACLResolverSettings.ACLDownPolicy) } return nil } diff --git a/agent/consul/config_ce.go b/agent/consul/config_ce.go index b65a518615086..e91d0981e86ee 100644 --- a/agent/consul/config_ce.go +++ b/agent/consul/config_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/testing/deployer/topology/generate.go b/agent/consul/config_cloud.go similarity index 51% rename from testing/deployer/topology/generate.go rename to agent/consul/config_cloud.go index f9d6e26c3acbc..5b62574c811b4 100644 --- a/testing/deployer/topology/generate.go +++ b/agent/consul/config_cloud.go @@ -1,6 +1,8 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package topology +package consul -//go:generate ../update-latest-versions.sh +type CloudConfig struct { + ManagementToken string +} diff --git a/agent/consul/config_endpoint.go b/agent/consul/config_endpoint.go index a78859c35058e..4108eb20b95c6 100644 --- a/agent/consul/config_endpoint.go +++ b/agent/consul/config_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/config_endpoint_test.go b/agent/consul/config_endpoint_test.go index 49a10dce21797..7dc7632fade71 100644 --- a/agent/consul/config_endpoint_test.go +++ b/agent/consul/config_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/config_replication.go b/agent/consul/config_replication.go index f3e5635007aa1..a742d8fad8bb1 100644 --- a/agent/consul/config_replication.go +++ b/agent/consul/config_replication.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/config_replication_test.go b/agent/consul/config_replication_test.go index e2c4fbee8d8a1..71798dda0a329 100644 --- a/agent/consul/config_replication_test.go +++ b/agent/consul/config_replication_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/config_test.go b/agent/consul/config_test.go index 8e61b8fe96806..e2706e00b4065 100644 --- a/agent/consul/config_test.go +++ b/agent/consul/config_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/configentry_backend.go b/agent/consul/configentry_backend.go deleted file mode 100644 index 2a572fcb68d80..0000000000000 --- a/agent/consul/configentry_backend.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package consul - -import ( - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/acl/resolver" - "github.com/hashicorp/consul/agent/grpc-external/services/configentry" -) - -type ConfigEntryBackend struct { - srv *Server -} - -var _ configentry.Backend = (*ConfigEntryBackend)(nil) - -// NewConfigEntryBackend returns a configentry.Backend implementation that is bound to the given server. -func NewConfigEntryBackend(srv *Server) *ConfigEntryBackend { - return &ConfigEntryBackend{ - srv: srv, - } -} - -func (b *ConfigEntryBackend) EnterpriseCheckPartitions(partition string) error { - return b.enterpriseCheckPartitions(partition) -} - -func (b *ConfigEntryBackend) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzCtx *acl.AuthorizerContext) (resolver.Result, error) { - return b.srv.ResolveTokenAndDefaultMeta(token, entMeta, authzCtx) -} diff --git a/agent/consul/configentry_backend_ce.go b/agent/consul/configentry_backend_ce.go deleted file mode 100644 index 082f64532d42a..0000000000000 --- a/agent/consul/configentry_backend_ce.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package consul - -import ( - "fmt" - "strings" -) - -func (b *ConfigEntryBackend) enterpriseCheckPartitions(partition string) error { - if partition == "" || strings.EqualFold(partition, "default") { - return nil - } - return fmt.Errorf("Partitions are a Consul Enterprise feature") -} diff --git a/agent/consul/configentry_backend_ce_test.go b/agent/consul/configentry_backend_ce_test.go deleted file mode 100644 index f8a945cbfdf45..0000000000000 --- a/agent/consul/configentry_backend_ce_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package consul - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - gogrpc "google.golang.org/grpc" - - "github.com/hashicorp/consul/proto/private/pbconfigentry" - "github.com/hashicorp/consul/sdk/freeport" - "github.com/hashicorp/consul/testrpc" -) - -func TestConfigEntryBackend_RejectsPartition(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - - _, s1 := testServerWithConfig(t, func(c *Config) { - c.GRPCTLSPort = freeport.GetOne(t) - }) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // make a grpc client to dial s1 directly - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - t.Cleanup(cancel) - - conn, err := gogrpc.DialContext(ctx, s1.config.RPCAddr.String(), - gogrpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), - //nolint:staticcheck - gogrpc.WithInsecure(), - gogrpc.WithBlock()) - require.NoError(t, err) - t.Cleanup(func() { conn.Close() }) - - configEntryClient := pbconfigentry.NewConfigEntryServiceClient(conn) - - req := pbconfigentry.GetResolvedExportedServicesRequest{ - Partition: "test", - } - _, err = configEntryClient.GetResolvedExportedServices(ctx, &req) - require.Error(t, err) - require.Contains(t, err.Error(), "Partitions are a Consul Enterprise feature") -} - -func TestConfigEntryBackend_IgnoresDefaultPartition(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - - _, s1 := testServerWithConfig(t, func(c *Config) { - c.GRPCTLSPort = freeport.GetOne(t) - }) - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // make a grpc client to dial s1 directly - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - t.Cleanup(cancel) - - conn, err := gogrpc.DialContext(ctx, s1.config.RPCAddr.String(), - gogrpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), - //nolint:staticcheck - gogrpc.WithInsecure(), - gogrpc.WithBlock()) - require.NoError(t, err) - t.Cleanup(func() { conn.Close() }) - - configEntryClient := pbconfigentry.NewConfigEntryServiceClient(conn) - - req := pbconfigentry.GetResolvedExportedServicesRequest{ - Partition: "DeFaUlT", - } - _, err = configEntryClient.GetResolvedExportedServices(ctx, &req) - require.NoError(t, err) -} diff --git a/agent/consul/configentry_backend_test.go b/agent/consul/configentry_backend_test.go deleted file mode 100644 index 30c8346e27c0a..0000000000000 --- a/agent/consul/configentry_backend_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package consul - -import ( - "context" - "testing" - "time" - - "github.com/hashicorp/consul/proto/private/pbconfigentry" - "github.com/hashicorp/consul/sdk/freeport" - "github.com/hashicorp/consul/testrpc" - "github.com/stretchr/testify/require" - gogrpc "google.golang.org/grpc" -) - -func TestConfigEntryBackend_EmptyPartition(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - - _, s1 := testServerWithConfig(t, func(c *Config) { - c.GRPCTLSPort = freeport.GetOne(t) - }) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // make a grpc client to dial s1 directly - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - t.Cleanup(cancel) - - conn, err := gogrpc.DialContext(ctx, s1.config.RPCAddr.String(), - gogrpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), - //nolint:staticcheck - gogrpc.WithInsecure(), - gogrpc.WithBlock()) - require.NoError(t, err) - t.Cleanup(func() { conn.Close() }) - - configEntryClient := pbconfigentry.NewConfigEntryServiceClient(conn) - - req := pbconfigentry.GetResolvedExportedServicesRequest{ - Partition: "", - } - _, err = configEntryClient.GetResolvedExportedServices(ctx, &req) - require.NoError(t, err) -} diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index c61ee6ded900e..771eae2464b95 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -1,9 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul import ( + "errors" "fmt" "time" @@ -25,11 +26,10 @@ var ( // variable points to. Clients need to compare using `err.Error() == // consul.ErrRateLimited.Error()` which is very sad. Short of replacing our // RPC mechanism it's hard to know how to make that much better though. - - ErrConnectNotEnabled = structs.ErrConnectNotEnabled - ErrRateLimited = structs.ErrRateLimited - ErrNotPrimaryDatacenter = structs.ErrNotPrimaryDatacenter - ErrStateReadOnly = structs.ErrStateReadOnly + ErrConnectNotEnabled = errors.New("Connect must be enabled in order to use this endpoint") + ErrRateLimited = errors.New("Rate limit reached, try again later") // Note: we depend on this error message in the gRPC ConnectCA.Sign endpoint (see: isRateLimitError). + ErrNotPrimaryDatacenter = errors.New("not the primary datacenter") + ErrStateReadOnly = errors.New("CA Provider State is read-only") ) const ( diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index 587ed42b5a10e..3911db1923109 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/context.go b/agent/consul/context.go index d85124f74881a..7de4157d8f401 100644 --- a/agent/consul/context.go +++ b/agent/consul/context.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/context_test.go b/agent/consul/context_test.go index 42e6feb744868..264dcdd988611 100644 --- a/agent/consul/context_test.go +++ b/agent/consul/context_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/controller/controller.go b/agent/consul/controller/controller.go index 9f49b9cb91bb1..f8d6a50c7f827 100644 --- a/agent/consul/controller/controller.go +++ b/agent/consul/controller/controller.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package controller @@ -11,15 +11,13 @@ import ( "sync/atomic" "time" - "golang.org/x/sync/errgroup" - "github.com/hashicorp/go-hclog" + "golang.org/x/sync/errgroup" "github.com/hashicorp/consul/agent/consul/controller/queue" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/lib/retry" ) // much of this is a re-implementation of @@ -218,41 +216,38 @@ func (c *controller) Run(ctx context.Context) error { for _, sub := range c.subscriptions { // store a reference for the closure sub := sub - // Fetch data from subscriptions repeatedly until the context is cancelled. c.group.Go(func() error { - defer c.logger.Debug("stopping controller subscription", "topic", sub.request.Topic) - lastFailTime := time.Now() - retryWaiter := &retry.Waiter{ - MinFailures: 1, - MinWait: 1 * time.Second, - MaxWait: 20 * time.Second, + var index uint64 + + subscription, err := c.publisher.Subscribe(sub.request) + if err != nil { + return err } - // Ensure the subscription is restarted when non-context errors happen. - // Stop if either the parent context or the group ctx is cancelled. - for c.groupCtx.Err() == nil { - c.logger.Debug("rewatching controller subscription", "topic", sub.request.Topic) - err := c.watchSubscription(ctx, sub) + defer subscription.Unsubscribe() + + for { + event, err := subscription.Next(ctx) switch { case errors.Is(err, context.Canceled): return nil - case errors.Is(err, stream.ErrSubForceClosed): - c.logger.Debug("controller subscription force closed", "topic", sub.request.Topic) case err != nil: - // Log the error and backoff wait. Do not return the error - // or else the subscriptions will stop being watched. - c.logger.Warn("error watching controller subscription", - "topic", sub.request.Topic, - "err", err) - // Reset the waiter if the last failure was more than 10 minutes ago. - // This simply prevents the backoff from being too aggressive. - if time.Now().After(lastFailTime.Add(10 * time.Minute)) { - retryWaiter.Reset() - } - lastFailTime = time.Now() - retryWaiter.Wait(c.groupCtx) + return err + } + + if event.IsFramingEvent() { + continue + } + + if event.Index <= index { + continue + } + + index = event.Index + + if err := c.processEvent(sub, event); err != nil { + return err } } - return nil }) } @@ -276,38 +271,6 @@ func (c *controller) Run(ctx context.Context) error { return nil } -// watchSubscription fetches events in a loop that stops on the first error. -func (c *controller) watchSubscription(ctx context.Context, sub subscription) error { - var index uint64 - subscription, err := c.publisher.Subscribe(sub.request) - if err != nil { - return err - } - defer subscription.Unsubscribe() - - for ctx.Err() == nil { - event, err := subscription.Next(ctx) - if err != nil { - return err - } - - if event.IsFramingEvent() { - continue - } - - if event.Index <= index { - continue - } - - index = event.Index - - if err := c.processEvent(sub, event); err != nil { - return err - } - } - return ctx.Err() -} - // AddTrigger allows for triggering a reconciliation request every time that the // triggering function returns, when the passed in context is canceled // the trigger must return @@ -417,7 +380,7 @@ func (c *controller) reconcileHandler(ctx context.Context, req Request) { var requeueAfter RequeueAfterError if errors.As(err, &requeueAfter) { c.work.Forget(req) - c.work.AddAfter(req, time.Duration(requeueAfter), false) + c.work.AddAfter(req, time.Duration(requeueAfter)) return } diff --git a/agent/consul/controller/controller_test.go b/agent/consul/controller/controller_test.go index 1d1002e8abede..97d110222b3e3 100644 --- a/agent/consul/controller/controller_test.go +++ b/agent/consul/controller/controller_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package controller diff --git a/agent/consul/controller/doc.go b/agent/consul/controller/doc.go index ba30d95a546fd..638eb5c5d9a2e 100644 --- a/agent/consul/controller/doc.go +++ b/agent/consul/controller/doc.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // Package controller contains a re-implementation of the Kubernetes // [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) diff --git a/agent/consul/controller/queue/defer.go b/agent/consul/controller/queue/defer.go index 6ba5d09aa97dd..01666219c2919 100644 --- a/agent/consul/controller/queue/defer.go +++ b/agent/consul/controller/queue/defer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package queue @@ -18,10 +18,8 @@ type DeferQueue[T ItemType] interface { // Defer defers processing a Request until a given time. When // the timeout is hit, the request will be processed by the // callback given in the Process loop. If the given context - // is canceled, the item is not deferred. Override replaces - // any existing item regardless of the enqueue time when true. - Defer(ctx context.Context, item T, until time.Time, override bool) - + // is canceled, the item is not deferred. + Defer(ctx context.Context, item T, until time.Time) // Process processes all items in the defer queue with the // given callback, blocking until the given context is canceled. // Callers should only ever call Process once, likely in a @@ -34,9 +32,6 @@ type DeferQueue[T ItemType] interface { type deferredRequest[T ItemType] struct { enqueueAt time.Time item T - // override replaces any existing item when true regardless - // of the enqueue time - override bool // index holds the index for the given heap entry so that if // the entry is updated the heap can be re-sorted index int @@ -69,11 +64,10 @@ func NewDeferQueue[T ItemType](tick time.Duration) DeferQueue[T] { // Defer defers the given Request until the given time in the future. If the // passed in context is canceled before the Request is deferred, then this // immediately returns. -func (q *deferQueue[T]) Defer(ctx context.Context, item T, until time.Time, override bool) { +func (q *deferQueue[T]) Defer(ctx context.Context, item T, until time.Time) { entry := &deferredRequest[T]{ enqueueAt: until, item: item, - override: override, } select { @@ -85,9 +79,9 @@ func (q *deferQueue[T]) Defer(ctx context.Context, item T, until time.Time, over // deferEntry adds a deferred request to the priority queue func (q *deferQueue[T]) deferEntry(entry *deferredRequest[T]) { existing, exists := q.entries[entry.item.Key()] - // insert or update the item deferral time if exists { - if entry.override || entry.enqueueAt.Before(existing.enqueueAt) { + // insert or update the item deferral time + if existing.enqueueAt.After(entry.enqueueAt) { existing.enqueueAt = entry.enqueueAt heap.Fix(q.heap, existing.index) } diff --git a/agent/consul/controller/queue/queue.go b/agent/consul/controller/queue/queue.go index ed26ca6ff50f5..6d9f0a657125d 100644 --- a/agent/consul/controller/queue/queue.go +++ b/agent/consul/controller/queue/queue.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package queue @@ -27,9 +27,8 @@ type WorkQueue[T ItemType] interface { Get() (item T, shutdown bool) // Add immediately adds a Request to the work queue. Add(item T) - // AddAfter adds a Request to the work queue after a given amount of time - // with the option to override any existing Request that may be scheduled. - AddAfter(item T, duration time.Duration, override bool) + // AddAfter adds a Request to the work queue after a given amount of time. + AddAfter(item T, duration time.Duration) // AddRateLimited adds a Request to the work queue after the amount of time // specified by applying the queue's rate limiter. AddRateLimited(item T) @@ -42,10 +41,10 @@ type WorkQueue[T ItemType] interface { // queue implements a rate-limited work queue type queue[T ItemType] struct { - // queue holds an ordered list of non-deferred Requests needing to be processed + // queue holds an ordered list of Requests needing to be processed queue []T - // dirty holds the working set of all non-deferred Requests, whether they are being + // dirty holds the working set of all Requests, whether they are being // processed or not dirty map[string]struct{} // processing holds the set of current requests being processed @@ -146,9 +145,8 @@ func (q *queue[T]) Add(item T) { q.cond.Signal() } -// AddAfter adds a Request to the work queue after a given amount of time with -// the option to override any existing Request that may be scheduled. -func (q *queue[T]) AddAfter(item T, duration time.Duration, override bool) { +// AddAfter adds a Request to the work queue after a given amount of time. +func (q *queue[T]) AddAfter(item T, duration time.Duration) { // don't add if we're already shutting down if q.shuttingDown() { return @@ -160,13 +158,13 @@ func (q *queue[T]) AddAfter(item T, duration time.Duration, override bool) { return } - q.deferred.Defer(q.ctx, item, time.Now().Add(duration), override) + q.deferred.Defer(q.ctx, item, time.Now().Add(duration)) } // AddRateLimited adds the given Request to the queue after applying the // rate limiter to determine when the Request should next be processed. func (q *queue[T]) AddRateLimited(item T) { - q.AddAfter(item, q.ratelimiter.NextRetry(item), false) + q.AddAfter(item, q.ratelimiter.NextRetry(item)) } // Forget signals the queue to reset the rate-limiting for the given Request. diff --git a/agent/consul/controller/queue/rate.go b/agent/consul/controller/queue/rate.go index 615047fdeb39b..471601f85a270 100644 --- a/agent/consul/controller/queue/rate.go +++ b/agent/consul/controller/queue/rate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package queue diff --git a/agent/consul/controller/queue/rate_test.go b/agent/consul/controller/queue/rate_test.go index 166111d5c7508..40dc540138e2a 100644 --- a/agent/consul/controller/queue/rate_test.go +++ b/agent/consul/controller/queue/rate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package queue diff --git a/agent/consul/controller/queue_test.go b/agent/consul/controller/queue_test.go index b7aa08af650cd..11e1bc82b7626 100644 --- a/agent/consul/controller/queue_test.go +++ b/agent/consul/controller/queue_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package controller @@ -59,8 +59,8 @@ func (c *countingWorkQueue[T]) adds() uint64 { return atomic.LoadUint64(&c.addCounter) } -func (c *countingWorkQueue[T]) AddAfter(item T, duration time.Duration, override bool) { - c.inner.AddAfter(item, duration, override) +func (c *countingWorkQueue[T]) AddAfter(item T, duration time.Duration) { + c.inner.AddAfter(item, duration) atomic.AddUint64(&c.addAfterCounter, 1) } diff --git a/agent/consul/controller/reconciler.go b/agent/consul/controller/reconciler.go index fa948f81f6e62..dc4222508b57b 100644 --- a/agent/consul/controller/reconciler.go +++ b/agent/consul/controller/reconciler.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package controller diff --git a/agent/consul/controller/reconciler_test.go b/agent/consul/controller/reconciler_test.go index 5229e7d1d474e..56ae022ea263b 100644 --- a/agent/consul/controller/reconciler_test.go +++ b/agent/consul/controller/reconciler_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package controller @@ -47,7 +47,6 @@ func (r *testReconciler) setResponse(err error) { func (r *testReconciler) step() { r.stepChan <- struct{}{} } - func (r *testReconciler) stepFor(duration time.Duration) { select { case r.stepChan <- struct{}{}: diff --git a/agent/consul/coordinate_endpoint.go b/agent/consul/coordinate_endpoint.go index f0e69332ee686..28bf63b0bfd80 100644 --- a/agent/consul/coordinate_endpoint.go +++ b/agent/consul/coordinate_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/coordinate_endpoint_test.go b/agent/consul/coordinate_endpoint_test.go index 1c693ba83bbfe..fbb3e13aa76d6 100644 --- a/agent/consul/coordinate_endpoint_test.go +++ b/agent/consul/coordinate_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/discovery_chain_endpoint.go b/agent/consul/discovery_chain_endpoint.go index c70cebb094e68..4d1f9959c96e8 100644 --- a/agent/consul/discovery_chain_endpoint.go +++ b/agent/consul/discovery_chain_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/discovery_chain_endpoint_test.go b/agent/consul/discovery_chain_endpoint_test.go index 62d90e9020af6..b0197f00d493d 100644 --- a/agent/consul/discovery_chain_endpoint_test.go +++ b/agent/consul/discovery_chain_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/discoverychain/compile.go b/agent/consul/discoverychain/compile.go index c1cb3157a2427..4424a75f314c6 100644 --- a/agent/consul/discoverychain/compile.go +++ b/agent/consul/discoverychain/compile.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package discoverychain diff --git a/agent/consul/discoverychain/compile_ce.go b/agent/consul/discoverychain/compile_ce.go index d407e4cc1c891..d980c71f38f05 100644 --- a/agent/consul/discoverychain/compile_ce.go +++ b/agent/consul/discoverychain/compile_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package discoverychain diff --git a/agent/consul/discoverychain/compile_test.go b/agent/consul/discoverychain/compile_test.go index 8c9c9dfec7f8e..e916f6811acb7 100644 --- a/agent/consul/discoverychain/compile_test.go +++ b/agent/consul/discoverychain/compile_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package discoverychain @@ -1772,6 +1772,9 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase { Kind: structs.ProxyDefaults, Name: structs.ProxyConfigGlobal, Protocol: "grpc", + Config: map[string]interface{}{ + "protocol": "grpc", + }, MeshGateway: structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, }, diff --git a/agent/consul/discoverychain/gateway.go b/agent/consul/discoverychain/gateway.go index 3435233aafed1..eac04ce8c011e 100644 --- a/agent/consul/discoverychain/gateway.go +++ b/agent/consul/discoverychain/gateway.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package discoverychain @@ -27,10 +27,9 @@ type GatewayChainSynthesizer struct { } type hostnameMatch struct { - match structs.HTTPMatch - filters structs.HTTPFilters - responseFilters structs.HTTPResponseFilters - services []structs.HTTPService + match structs.HTTPMatch + filters structs.HTTPFilters + services []structs.HTTPService } // NewGatewayChainSynthesizer creates a new GatewayChainSynthesizer for the @@ -88,10 +87,9 @@ func initHostMatches(hostname string, route *structs.HTTPRouteConfigEntry, curre // Add all matches for this rule to the list for this hostname for _, match := range rule.Matches { matches = append(matches, hostnameMatch{ - match: match, - filters: rule.Filters, - responseFilters: rule.ResponseFilters, - services: rule.Services, + match: match, + filters: rule.Filters, + services: rule.Services, }) } } @@ -233,10 +231,9 @@ func consolidateHTTPRoutes(matchesByHostname map[string][]hostnameMatch, listene // Add all rules for this hostname for _, rule := range rules { route.Rules = append(route.Rules, structs.HTTPRouteRule{ - Matches: []structs.HTTPMatch{rule.match}, - Filters: rule.filters, - ResponseFilters: rule.responseFilters, - Services: rule.services, + Matches: []structs.HTTPMatch{rule.match}, + Filters: rule.filters, + Services: rule.services, }) } diff --git a/agent/consul/discoverychain/gateway_httproute.go b/agent/consul/discoverychain/gateway_httproute.go index c4816e0274497..fcd2dc440259f 100644 --- a/agent/consul/discoverychain/gateway_httproute.go +++ b/agent/consul/discoverychain/gateway_httproute.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package discoverychain @@ -79,8 +79,7 @@ func httpRouteToDiscoveryChain(route structs.HTTPRouteConfigEntry) (*structs.Ser var defaults []*structs.ServiceConfigEntry for idx, rule := range route.Rules { - requestModifier := httpRouteFiltersToServiceRouteHeaderModifier(rule.Filters.Headers) - responseModifier := httpRouteFiltersToServiceRouteHeaderModifier(rule.ResponseFilters.Headers) + modifier := httpRouteFiltersToServiceRouteHeaderModifier(rule.Filters.Headers) prefixRewrite := httpRouteFiltersToDestinationPrefixRewrite(rule.Filters.URLRewrite) var destination structs.ServiceRouteDestination @@ -91,29 +90,16 @@ func httpRouteToDiscoveryChain(route structs.HTTPRouteConfigEntry) (*structs.Ser if service.Filters.URLRewrite == nil { servicePrefixRewrite = prefixRewrite } - - // Merge service request header modifier(s) onto route rule modifiers - // Note: Removals for the same header may exist on the rule + the service and - // will result in idempotent duplicate values in the modifier w/ service coming last - serviceRequestModifier := httpRouteFiltersToServiceRouteHeaderModifier(service.Filters.Headers) - requestModifier.Add = mergeMaps(requestModifier.Add, serviceRequestModifier.Add) - requestModifier.Set = mergeMaps(requestModifier.Set, serviceRequestModifier.Set) - requestModifier.Remove = append(requestModifier.Remove, serviceRequestModifier.Remove...) - - // Merge service response header modifier(s) onto route rule modifiers - // Note: Removals for the same header may exist on the rule + the service and - // will result in idempotent duplicate values in the modifier w/ service coming last - serviceResponseModifier := httpRouteFiltersToServiceRouteHeaderModifier(service.ResponseFilters.Headers) - responseModifier.Add = mergeMaps(responseModifier.Add, serviceResponseModifier.Add) - responseModifier.Set = mergeMaps(responseModifier.Set, serviceResponseModifier.Set) - responseModifier.Remove = append(responseModifier.Remove, serviceResponseModifier.Remove...) + serviceModifier := httpRouteFiltersToServiceRouteHeaderModifier(service.Filters.Headers) + modifier.Add = mergeMaps(modifier.Add, serviceModifier.Add) + modifier.Set = mergeMaps(modifier.Set, serviceModifier.Set) + modifier.Remove = append(modifier.Remove, serviceModifier.Remove...) destination.Service = service.Name destination.Namespace = service.NamespaceOrDefault() destination.Partition = service.PartitionOrDefault() destination.PrefixRewrite = servicePrefixRewrite - destination.RequestHeaders = requestModifier - destination.ResponseHeaders = responseModifier + destination.RequestHeaders = modifier // since we have already validated the protocol elsewhere, we // create a new service defaults here to make sure we pass validation @@ -129,8 +115,7 @@ func httpRouteToDiscoveryChain(route structs.HTTPRouteConfigEntry) (*structs.Ser destination.Namespace = route.NamespaceOrDefault() destination.Partition = route.PartitionOrDefault() destination.PrefixRewrite = prefixRewrite - destination.RequestHeaders = requestModifier - destination.ResponseHeaders = responseModifier + destination.RequestHeaders = modifier splitter := &structs.ServiceSplitterConfigEntry{ Kind: structs.ServiceSplitter, @@ -176,25 +161,6 @@ func httpRouteToDiscoveryChain(route structs.HTTPRouteConfigEntry) (*structs.Ser } } - if rule.Filters.RetryFilter != nil { - - destination.NumRetries = rule.Filters.RetryFilter.NumRetries - destination.RetryOnConnectFailure = rule.Filters.RetryFilter.RetryOnConnectFailure - - if len(rule.Filters.RetryFilter.RetryOn) > 0 { - destination.RetryOn = rule.Filters.RetryFilter.RetryOn - } - - if len(rule.Filters.RetryFilter.RetryOnStatusCodes) > 0 { - destination.RetryOnStatusCodes = rule.Filters.RetryFilter.RetryOnStatusCodes - } - } - - if rule.Filters.TimeoutFilter != nil { - destination.IdleTimeout = rule.Filters.TimeoutFilter.IdleTimeout - destination.RequestTimeout = rule.Filters.TimeoutFilter.RequestTimeout - } - // for each match rule a ServiceRoute is created for the service-router // if there are no rules a single route with the destination is set if len(rule.Matches) == 0 { @@ -207,7 +173,6 @@ func httpRouteToDiscoveryChain(route structs.HTTPRouteConfigEntry) (*structs.Ser Destination: &destination, }) } - } return router, splitters, defaults diff --git a/agent/consul/discoverychain/gateway_tcproute.go b/agent/consul/discoverychain/gateway_tcproute.go index 910fd517551c0..21afef3ec1846 100644 --- a/agent/consul/discoverychain/gateway_tcproute.go +++ b/agent/consul/discoverychain/gateway_tcproute.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package discoverychain diff --git a/agent/consul/discoverychain/gateway_test.go b/agent/consul/discoverychain/gateway_test.go index 91931337c42e2..c5eb575c58ce5 100644 --- a/agent/consul/discoverychain/gateway_test.go +++ b/agent/consul/discoverychain/gateway_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package discoverychain @@ -518,70 +518,8 @@ func TestGatewayChainSynthesizer_Synthesize(t *testing.T) { Kind: structs.HTTPRoute, Name: "http-route", Rules: []structs.HTTPRouteRule{{ - Filters: structs.HTTPFilters{ - Headers: []structs.HTTPHeaderFilter{ - { - Add: map[string]string{"add me to the rule request": "present"}, - Set: map[string]string{"set me on the rule request": "present"}, - Remove: []string{"remove me from the rule request"}, - }, - { - Add: map[string]string{"add me to the rule and service request": "rule"}, - Set: map[string]string{"set me on the rule and service request": "rule"}, - }, - { - Remove: []string{"remove me from the rule and service request"}, - }, - }, - }, - ResponseFilters: structs.HTTPResponseFilters{ - Headers: []structs.HTTPHeaderFilter{{ - Add: map[string]string{ - "add me to the rule response": "present", - "add me to the rule and service response": "rule", - }, - Set: map[string]string{ - "set me on the rule response": "present", - "set me on the rule and service response": "rule", - }, - Remove: []string{ - "remove me from the rule response", - "remove me from the rule and service response", - }, - }}, - }, Services: []structs.HTTPService{{ Name: "foo", - Filters: structs.HTTPFilters{ - Headers: []structs.HTTPHeaderFilter{ - { - Add: map[string]string{"add me to the service request": "present"}, - }, - { - Set: map[string]string{"set me on the service request": "present"}, - Remove: []string{"remove me from the service request"}, - }, - { - Add: map[string]string{"add me to the rule and service request": "service"}, - Set: map[string]string{"set me on the rule and service request": "service"}, - Remove: []string{"remove me from the rule and service request"}, - }, - }, - }, - ResponseFilters: structs.HTTPResponseFilters{ - Headers: []structs.HTTPHeaderFilter{ - { - Add: map[string]string{"add me to the service response": "present"}, - Set: map[string]string{"set me on the service response": "present"}, - Remove: []string{"remove me from the service response"}, - }, - { - Add: map[string]string{"add me to the rule and service response": "service"}, - Set: map[string]string{"set me on the rule and service response": "service"}, - Remove: []string{"remove me from the rule and service response"}, - }, - }, - }, }}, }}, }, @@ -619,40 +557,8 @@ func TestGatewayChainSynthesizer_Synthesize(t *testing.T) { Partition: "default", Namespace: "default", RequestHeaders: &structs.HTTPHeaderModifiers{ - Add: map[string]string{ - "add me to the rule request": "present", - "add me to the service request": "present", - "add me to the rule and service request": "service", - }, - Set: map[string]string{ - "set me on the rule request": "present", - "set me on the service request": "present", - "set me on the rule and service request": "service", - }, - Remove: []string{ - "remove me from the rule request", - "remove me from the rule and service request", - "remove me from the service request", - "remove me from the rule and service request", - }, - }, - ResponseHeaders: &structs.HTTPHeaderModifiers{ - Add: map[string]string{ - "add me to the rule response": "present", - "add me to the service response": "present", - "add me to the rule and service response": "service", - }, - Set: map[string]string{ - "set me on the rule response": "present", - "set me on the service response": "present", - "set me on the rule and service response": "service", - }, - Remove: []string{ - "remove me from the rule response", - "remove me from the rule and service response", - "remove me from the service response", - "remove me from the rule and service response", - }, + Add: make(map[string]string), + Set: make(map[string]string), }, }, }, @@ -758,10 +664,6 @@ func TestGatewayChainSynthesizer_Synthesize(t *testing.T) { Add: make(map[string]string), Set: make(map[string]string), }, - ResponseHeaders: &structs.HTTPHeaderModifiers{ - Add: make(map[string]string), - Set: make(map[string]string), - }, }, }, NextNode: "resolver:foo-2.default.default.dc2", @@ -950,10 +852,6 @@ func TestGatewayChainSynthesizer_ComplexChain(t *testing.T) { Add: make(map[string]string), Set: make(map[string]string), }, - ResponseHeaders: &structs.HTTPHeaderModifiers{ - Add: make(map[string]string), - Set: make(map[string]string), - }, }, }, NextNode: "splitter:splitter-one.default.default", diff --git a/agent/consul/discoverychain/string_stack.go b/agent/consul/discoverychain/string_stack.go index d5f842f3dc615..e47743a3f3861 100644 --- a/agent/consul/discoverychain/string_stack.go +++ b/agent/consul/discoverychain/string_stack.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package discoverychain diff --git a/agent/consul/discoverychain/string_stack_test.go b/agent/consul/discoverychain/string_stack_test.go index 9867b91795206..84f58203d43b2 100644 --- a/agent/consul/discoverychain/string_stack_test.go +++ b/agent/consul/discoverychain/string_stack_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package discoverychain diff --git a/agent/consul/discoverychain/testing.go b/agent/consul/discoverychain/testing.go index 8992870f9c457..37a3bb4ec1162 100644 --- a/agent/consul/discoverychain/testing.go +++ b/agent/consul/discoverychain/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package discoverychain diff --git a/agent/consul/enterprise_client_ce.go b/agent/consul/enterprise_client_ce.go index 585c84d91d3ac..3d432213bd848 100644 --- a/agent/consul/enterprise_client_ce.go +++ b/agent/consul/enterprise_client_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/enterprise_config_ce.go b/agent/consul/enterprise_config_ce.go index cb26252e59358..15af4ea1603e7 100644 --- a/agent/consul/enterprise_config_ce.go +++ b/agent/consul/enterprise_config_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/enterprise_server_ce.go b/agent/consul/enterprise_server_ce.go index 61eb03cd1162e..8e56a8108cb35 100644 --- a/agent/consul/enterprise_server_ce.go +++ b/agent/consul/enterprise_server_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/enterprise_server_ce_test.go b/agent/consul/enterprise_server_ce_test.go index c039997a03f51..9bd3eb8c0c9b0 100644 --- a/agent/consul/enterprise_server_ce_test.go +++ b/agent/consul/enterprise_server_ce_test.go @@ -1,16 +1,18 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul import ( - "github.com/hashicorp/consul/sdk/testutil" + "testing" + hclog "github.com/hashicorp/go-hclog" ) -func newDefaultDepsEnterprise(t testutil.TestingTB, _ hclog.Logger, _ *Config) EnterpriseDeps { +func newDefaultDepsEnterprise(t *testing.T, _ hclog.Logger, _ *Config) EnterpriseDeps { t.Helper() return EnterpriseDeps{} } diff --git a/agent/consul/federation_state_endpoint.go b/agent/consul/federation_state_endpoint.go index 4afa481a397b6..db842e666d65d 100644 --- a/agent/consul/federation_state_endpoint.go +++ b/agent/consul/federation_state_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/federation_state_endpoint_test.go b/agent/consul/federation_state_endpoint_test.go index 2ada2fc17a469..977de1c9c193c 100644 --- a/agent/consul/federation_state_endpoint_test.go +++ b/agent/consul/federation_state_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/federation_state_replication.go b/agent/consul/federation_state_replication.go index 2f5a6dd150f31..f56c3c6089c73 100644 --- a/agent/consul/federation_state_replication.go +++ b/agent/consul/federation_state_replication.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/federation_state_replication_test.go b/agent/consul/federation_state_replication_test.go index 97a34b8dc8c16..5100e45926a13 100644 --- a/agent/consul/federation_state_replication_test.go +++ b/agent/consul/federation_state_replication_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/filter.go b/agent/consul/filter.go index 920b8e843676f..18643463690ee 100644 --- a/agent/consul/filter.go +++ b/agent/consul/filter.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/filter_test.go b/agent/consul/filter_test.go index 1ca34b0ed0726..d8f6ba54c3232 100644 --- a/agent/consul/filter_test.go +++ b/agent/consul/filter_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/flood.go b/agent/consul/flood.go index e01b5ed97eb83..ee7dfbc1dd5ec 100644 --- a/agent/consul/flood.go +++ b/agent/consul/flood.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/fsm/commands_ce.go b/agent/consul/fsm/commands_ce.go index 77bc94de1a9ae..e9f9f66e3361e 100644 --- a/agent/consul/fsm/commands_ce.go +++ b/agent/consul/fsm/commands_ce.go @@ -1,10 +1,9 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package fsm import ( - "errors" "fmt" "time" @@ -153,11 +152,7 @@ func init() { func (c *FSM) applyRegister(buf []byte, index uint64) interface{} { defer metrics.MeasureSince([]string{"fsm", "register"}, time.Now()) var req structs.RegisterRequest - if err := decodeRegistrationReq(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted register request") - return nil - } + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } @@ -172,11 +167,7 @@ func (c *FSM) applyRegister(buf []byte, index uint64) interface{} { func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} { defer metrics.MeasureSince([]string{"fsm", "deregister"}, time.Now()) var req structs.DeregisterRequest - if err := decodeDeregistrationReq(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted deregister request") - return nil - } + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } @@ -204,11 +195,7 @@ func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} { func (c *FSM) applyKVSOperation(buf []byte, index uint64) interface{} { var req structs.KVSRequest - if err := decodeKVSRequest(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted KV request") - return nil - } + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } defer metrics.MeasureSinceWithLabels([]string{"fsm", "kvs"}, time.Now(), @@ -253,11 +240,7 @@ func (c *FSM) applyKVSOperation(buf []byte, index uint64) interface{} { func (c *FSM) applySessionOperation(buf []byte, index uint64) interface{} { var req structs.SessionRequest - if err := decodeSessionRequest(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted session request") - return nil - } + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } defer metrics.MeasureSinceWithLabels([]string{"fsm", "session"}, time.Now(), @@ -316,11 +299,7 @@ func (c *FSM) applyCoordinateBatchUpdate(buf []byte, index uint64) interface{} { // state store. func (c *FSM) applyPreparedQueryOperation(buf []byte, index uint64) interface{} { var req structs.PreparedQueryRequest - if err := decodePreparedQueryRequest(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted prepared query request") - return nil - } + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } @@ -339,7 +318,7 @@ func (c *FSM) applyPreparedQueryOperation(buf []byte, index uint64) interface{} func (c *FSM) applyTxn(buf []byte, index uint64) interface{} { var req structs.TxnRequest - if err := decodeTxnRequest(buf, &req); err != nil { + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } defer metrics.MeasureSince([]string{"fsm", "txn"}, time.Now()) @@ -506,7 +485,7 @@ func (c *FSM) applyConnectCALeafOperation(buf []byte, index uint64) interface{} func (c *FSM) applyACLTokenSetOperation(buf []byte, index uint64) interface{} { var req structs.ACLTokenBatchSetRequest - if err := decodeACLTokenBatchSetRequest(buf, &req); err != nil { + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } defer metrics.MeasureSinceWithLabels([]string{"fsm", "acl", "token"}, time.Now(), @@ -544,7 +523,7 @@ func (c *FSM) applyACLTokenBootstrap(buf []byte, index uint64) interface{} { func (c *FSM) applyACLPolicySetOperation(buf []byte, index uint64) interface{} { var req structs.ACLPolicyBatchSetRequest - if err := decodeACLPolicyBatchSetRequest(buf, &req); err != nil { + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } defer metrics.MeasureSinceWithLabels([]string{"fsm", "acl", "policy"}, time.Now(), @@ -565,12 +544,10 @@ func (c *FSM) applyACLPolicyDeleteOperation(buf []byte, index uint64) interface{ } func (c *FSM) applyConfigEntryOperation(buf []byte, index uint64) interface{} { - req := structs.ConfigEntryRequest{} - if err := decodeConfigEntryOperationRequest(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted config entry request") - return nil - } + req := structs.ConfigEntryRequest{ + Entry: &structs.ProxyConfigEntry{}, + } + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } @@ -617,7 +594,7 @@ func (c *FSM) applyConfigEntryOperation(buf []byte, index uint64) interface{} { func (c *FSM) applyACLRoleSetOperation(buf []byte, index uint64) interface{} { var req structs.ACLRoleBatchSetRequest - if err := decodeACLRoleBatchSetRequest(buf, &req); err != nil { + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } defer metrics.MeasureSinceWithLabels([]string{"fsm", "acl", "role"}, time.Now(), @@ -639,7 +616,7 @@ func (c *FSM) applyACLRoleDeleteOperation(buf []byte, index uint64) interface{} func (c *FSM) applyACLBindingRuleSetOperation(buf []byte, index uint64) interface{} { var req structs.ACLBindingRuleBatchSetRequest - if err := decodeACLBindingRuleBatchSetRequest(buf, &req); err != nil { + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } defer metrics.MeasureSinceWithLabels([]string{"fsm", "acl", "bindingrule"}, time.Now(), @@ -661,7 +638,7 @@ func (c *FSM) applyACLBindingRuleDeleteOperation(buf []byte, index uint64) inter func (c *FSM) applyACLAuthMethodSetOperation(buf []byte, index uint64) interface{} { var req structs.ACLAuthMethodBatchSetRequest - if err := decodeACLAuthMethodBatchSetRequest(buf, &req); err != nil { + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } defer metrics.MeasureSinceWithLabels([]string{"fsm", "acl", "authmethod"}, time.Now(), @@ -672,11 +649,7 @@ func (c *FSM) applyACLAuthMethodSetOperation(buf []byte, index uint64) interface func (c *FSM) applyACLAuthMethodDeleteOperation(buf []byte, index uint64) interface{} { var req structs.ACLAuthMethodBatchDeleteRequest - if err := decodeACLAuthMethodBatchDeleteRequest(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted acl auth method delete request") - return nil - } + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } defer metrics.MeasureSinceWithLabels([]string{"fsm", "acl", "authmethod"}, time.Now(), @@ -733,11 +706,7 @@ func (c *FSM) applySystemMetadataOperation(buf []byte, index uint64) interface{} func (c *FSM) applyPeeringWrite(buf []byte, index uint64) interface{} { var req pbpeering.PeeringWriteRequest - if err := decodePeeringWriteRequest(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted peering write request") - return nil - } + if err := structs.DecodeProto(buf, &req); err != nil { panic(fmt.Errorf("failed to decode peering write request: %v", err)) } @@ -749,11 +718,7 @@ func (c *FSM) applyPeeringWrite(buf []byte, index uint64) interface{} { func (c *FSM) applyPeeringDelete(buf []byte, index uint64) interface{} { var req pbpeering.PeeringDeleteRequest - if err := decodePeeringDeleteRequest(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted peering delete request") - return nil - } + if err := structs.DecodeProto(buf, &req); err != nil { panic(fmt.Errorf("failed to decode peering delete request: %v", err)) } @@ -793,11 +758,7 @@ func (c *FSM) applyPeeringTerminate(buf []byte, index uint64) interface{} { func (c *FSM) applyPeeringTrustBundleWrite(buf []byte, index uint64) interface{} { var req pbpeering.PeeringTrustBundleWriteRequest - if err := decodePeeringTrustBundleWriteRequest(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted peering trust bundle write request") - return nil - } + if err := structs.DecodeProto(buf, &req); err != nil { panic(fmt.Errorf("failed to decode peering trust bundle write request: %v", err)) } @@ -809,11 +770,7 @@ func (c *FSM) applyPeeringTrustBundleWrite(buf []byte, index uint64) interface{} func (c *FSM) applyPeeringTrustBundleDelete(buf []byte, index uint64) interface{} { var req pbpeering.PeeringTrustBundleDeleteRequest - if err := decodePeeringTrustBundleDeleteRequest(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted peering trust bundle delete request") - return nil - } + if err := structs.DecodeProto(buf, &req); err != nil { panic(fmt.Errorf("failed to decode peering trust bundle delete request: %v", err)) } @@ -833,11 +790,7 @@ func (f *FSM) applyResourceOperation(buf []byte, idx uint64) any { func (c *FSM) applyManualVirtualIPs(buf []byte, index uint64) interface{} { var req state.ServiceVirtualIP - if err := decodeServiceVirtualIPRequest(buf, &req); err != nil { - if errors.Is(err, ErrDroppingTenantedReq) { - c.logger.Warn("dropping tenanted virtual ip request") - return nil - } + if err := structs.Decode(buf, &req); err != nil { panic(fmt.Errorf("failed to decode request: %v", err)) } diff --git a/agent/consul/fsm/commands_ce_test.go b/agent/consul/fsm/commands_ce_test.go index c24dd2ea8f404..cea6f05f54f3f 100644 --- a/agent/consul/fsm/commands_ce_test.go +++ b/agent/consul/fsm/commands_ce_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package fsm @@ -1539,6 +1539,7 @@ func TestFSM_Resources(t *testing.T) { }, Tenancy: &pbresource.Tenancy{ Partition: "default", + PeerName: "local", Namespace: "default", }, Name: "bar", diff --git a/agent/consul/fsm/decode_ce.go b/agent/consul/fsm/decode_ce.go deleted file mode 100644 index 2f4d3da3a26cb..0000000000000 --- a/agent/consul/fsm/decode_ce.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent -// +build !consulent - -package fsm - -import ( - "github.com/hashicorp/consul/agent/consul/state" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/private/pbpeering" -) - -func decodeRegistrationReq(buf []byte, req *structs.RegisterRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - return decodeRegistration(buf, req) -} - -func decodeDeregistrationReq(buf []byte, req *structs.DeregisterRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - return decodeDeregistration(buf, req) -} - -func decodeKVSRequest(buf []byte, req *structs.KVSRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - return decodeKVS(buf, req) -} - -func decodeSessionRequest(buf []byte, req *structs.SessionRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - - return decodeSession(buf, req) -} - -func decodePreparedQueryRequest(buf []byte, req *structs.PreparedQueryRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - return decodePreparedQuery(buf, req) -} - -func decodeTxnRequest(buf []byte, req *structs.TxnRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - return decodeTxn(buf, req) -} - -func decodeACLTokenBatchSetRequest(buf []byte, req *structs.ACLTokenBatchSetRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - return decodeACLTokenBatchSet(buf, req) - -} - -func decodeACLPolicyBatchSetRequest(buf []byte, req *structs.ACLPolicyBatchSetRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - return decodeACLPolicyBatchSet(buf, req) - -} - -func decodeACLRoleBatchSetRequest(buf []byte, req *structs.ACLRoleBatchSetRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - return decodeACLRoleBatchSet(buf, req) -} - -func decodeACLBindingRuleBatchSetRequest(buf []byte, req *structs.ACLBindingRuleBatchSetRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - return decodeACLBindingRuleBatchSet(buf, req) -} - -func decodeACLAuthMethodBatchSetRequest(buf []byte, req *structs.ACLAuthMethodBatchSetRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - return decodeACLAuthMethodBatchSet(buf, req) -} - -func decodeACLAuthMethodBatchDeleteRequest(buf []byte, req *structs.ACLAuthMethodBatchDeleteRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - - return decodeACLAuthMethodBatchDelete(buf, req) -} - -func decodeServiceVirtualIPRequest(buf []byte, req *state.ServiceVirtualIP) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - return decodeServiceVirtualIP(buf, req) -} - -func decodePeeringWriteRequest(buf []byte, req *pbpeering.PeeringWriteRequest) error { - if !structs.CEDowngrade { - return structs.DecodeProto(buf, req) - } - return decodePeeringWrite(buf, req) -} - -func decodePeeringDeleteRequest(buf []byte, req *pbpeering.PeeringDeleteRequest) error { - if !structs.CEDowngrade { - return structs.DecodeProto(buf, req) - } - - return decodePeeringDelete(buf, req) -} - -func decodePeeringTrustBundleWriteRequest(buf []byte, req *pbpeering.PeeringTrustBundleWriteRequest) error { - if !structs.CEDowngrade { - return structs.DecodeProto(buf, req) - } - return decodePeeringTrustBundleWrite(buf, req) -} - -func decodePeeringTrustBundleDeleteRequest(buf []byte, req *pbpeering.PeeringTrustBundleDeleteRequest) error { - if !structs.CEDowngrade { - return structs.DecodeProto(buf, req) - } - return decodePeeringTrustBundleDelete(buf, req) -} - -func decodeConfigEntryOperationRequest(buf []byte, req *structs.ConfigEntryRequest) error { - if !structs.CEDowngrade { - return structs.Decode(buf, req) - } - - return decodeConfigEntryOperation(buf, req) -} diff --git a/agent/consul/fsm/decode_downgrade.go b/agent/consul/fsm/decode_downgrade.go deleted file mode 100644 index 7b8e2fce719f2..0000000000000 --- a/agent/consul/fsm/decode_downgrade.go +++ /dev/null @@ -1,1011 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package fsm - -import ( - "errors" - "fmt" - - "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" - "github.com/hashicorp/consul/agent/consul/state" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/proto/private/pbpeering" -) - -func IsEnterpriseData(namespace, partition string) bool { - if (namespace != "" && namespace != "default") || (partition != "" && partition != "default") { - return true - } - return false -} - -var errIncompatibleTenantedData = errors.New("incompatible tenanted data") -var ErrDroppingTenantedReq = errors.New("dropping tenanted request") - -func decodeRegistration(buf []byte, req *structs.RegisterRequest) error { - type serviceRequest struct { - Namespace string - Partition string - *structs.NodeService - } - type checkRequest struct { - Namespace string - Partition string - *structs.HealthCheck - } - type NewRegReq struct { - - // shadows the Service field from the register request so that we can detect - // tenanted service registrations for untenanted nodes - Service *serviceRequest - - // shadows the Check field from the register request so that we can detect - // tenanted check registrations for untenanted nodes. - Check *checkRequest - - // shadows the Checks field for the same reasons as the singular version. - Checks []*checkRequest - - // Allows parsing the namespace of the whole request/node - Namespace string - - // Allows parsing the partition of the whole request/node - Partition string - *structs.RegisterRequest - } - var newReq NewRegReq - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - - // checks if the node is tenanted - if IsEnterpriseData(newReq.Namespace, newReq.Partition) { - // the whole request can be dropped because the node itself is tenanted - return ErrDroppingTenantedReq - } - - // check if the service is tenanted - if newReq.Service != nil && !IsEnterpriseData(newReq.Service.Namespace, newReq.Service.Partition) { - // copy the shadow service pointer into the real RegisterRequest - newReq.RegisterRequest.Service = newReq.Service.NodeService - } - - // check if the singular check is tenanted - if newReq.Check != nil && !IsEnterpriseData(newReq.Check.Namespace, newReq.Check.Partition) { - newReq.RegisterRequest.Check = newReq.Check.HealthCheck - } - - // check for tenanted checks in the slice - for _, chk := range newReq.Checks { - if !IsEnterpriseData(chk.Namespace, chk.Partition) { - newReq.RegisterRequest.Checks = append(newReq.RegisterRequest.Checks, chk.HealthCheck) - } - } - // copy the data to the output request value - *req = *newReq.RegisterRequest - return nil -} - -func decodeDeregistration(buf []byte, req *structs.DeregisterRequest) error { - type NewDeRegReq struct { - Namespace string - - // Allows parsing the partition of the whole request/node - Partition string - - *structs.DeregisterRequest - - // Allows parsing the namespace of the whole request/node - - } - var newReq NewDeRegReq - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - - // checks if the node is tenanted - if IsEnterpriseData(newReq.Namespace, newReq.Partition) { - // the whole request can be dropped because the node itself is tenanted - return ErrDroppingTenantedReq - } - - // copy the data to the output request value - *req = *newReq.DeregisterRequest - return nil -} - -func decodeKVS(buf []byte, req *structs.KVSRequest) error { - type dirEntryReq struct { - Namespace string - Partition string - *structs.DirEntry - } - type NewDirEntReq struct { - // shadows the DirEnt field from KVSRequest so that we can detect - // tenanted service registrations for untenanted nodes - DirEnt *dirEntryReq - *structs.KVSRequest - } - var newReq NewDirEntReq - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - - if newReq.DirEnt != nil && IsEnterpriseData(newReq.DirEnt.Namespace, newReq.DirEnt.Partition) { - return ErrDroppingTenantedReq - } - - newReq.KVSRequest.DirEnt = *newReq.DirEnt.DirEntry - *req = *newReq.KVSRequest - return nil -} - -func decodeSession(buf []byte, req *structs.SessionRequest) error { - type sessionReq struct { - Namespace string - Partition string - *structs.Session - } - type NewSessionReq struct { - // shadows the Session field from SessionRequest so that we can detect - // tenanted service registrations for untenanted nodes - Session *sessionReq - *structs.SessionRequest - } - var newReq NewSessionReq - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - - if newReq.Session != nil && IsEnterpriseData(newReq.Session.Namespace, newReq.Session.Partition) { - return ErrDroppingTenantedReq - - } - serviceChecks := newReq.Session.ServiceChecks - newReq.Session.ServiceChecks = nil - for _, sessionServiceCheck := range serviceChecks { - if !IsEnterpriseData(sessionServiceCheck.Namespace, "") { - newReq.Session.ServiceChecks = append(newReq.Session.ServiceChecks, sessionServiceCheck) - } - } - - newReq.SessionRequest.Session = *newReq.Session.Session - *req = *newReq.SessionRequest - return nil -} - -func decodePreparedQuery(buf []byte, req *structs.PreparedQueryRequest) error { - type serviceQuery struct { - Namespace string - Partition string - *structs.ServiceQuery - } - type prepQuery struct { - Service *serviceQuery - *structs.PreparedQuery - } - type NewPreparedQueryReq struct { - Query *prepQuery - *structs.PreparedQueryRequest - } - var newReq NewPreparedQueryReq - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - - if newReq.Query != nil && newReq.Query.Service != nil && IsEnterpriseData(newReq.Query.Service.Namespace, newReq.Query.Service.Partition) { - return ErrDroppingTenantedReq - } - - newReq.Query.PreparedQuery.Service = *newReq.Query.Service.ServiceQuery - newReq.PreparedQueryRequest.Query = newReq.Query.PreparedQuery - *req = *newReq.PreparedQueryRequest - return nil -} - -func decodeTxn(buf []byte, req *structs.TxnRequest) error { - type dirEntryReq struct { - Namespace string - Partition string - *structs.DirEntry - } - type txnKVOp struct { - DirEnt *dirEntryReq - *structs.TxnKVOp - } - type nodeService struct { - Namespace string - Partition string - *structs.NodeService - } - type txnServiceOp struct { - Service *nodeService - *structs.TxnServiceOp - } - type healthCheck struct { - Namespace string - Partition string - *structs.HealthCheck - } - type txnCheckOp struct { - Check *healthCheck - *structs.TxnCheckOp - } - type session struct { - Namespace string - Partition string - *structs.Session - } - type txnSessionOp struct { - Session *session - *structs.TxnSessionOp - } - // Only one of the types should be filled out per entry. - type txnOp struct { - KV *txnKVOp - Service *txnServiceOp - Check *txnCheckOp - Session *txnSessionOp - *structs.TxnOp - } - type NewTxnRequest struct { - Ops []*txnOp - *structs.TxnRequest - } - var newReq NewTxnRequest - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - for _, op := range newReq.Ops { - if op.KV != nil && op.KV.DirEnt != nil && !IsEnterpriseData(op.KV.DirEnt.Namespace, op.KV.DirEnt.Partition) { - txnOp := &structs.TxnOp{ - KV: &structs.TxnKVOp{ - Verb: op.KV.Verb, - DirEnt: *op.KV.DirEnt.DirEntry, - }, - } - newReq.TxnRequest.Ops = append(newReq.TxnRequest.Ops, txnOp) - continue - } - - if op.Service != nil && op.Service.Service != nil && !IsEnterpriseData(op.Service.Service.Namespace, op.Service.Service.Partition) { - txnOp := &structs.TxnOp{ - Service: &structs.TxnServiceOp{ - Verb: op.Service.Verb, - Node: op.Service.Node, - Service: *op.Service.Service.NodeService, - }, - } - newReq.TxnRequest.Ops = append(newReq.TxnRequest.Ops, txnOp) - continue - } - - if op.Check != nil && op.Check.Check != nil && !IsEnterpriseData(op.Check.Check.Namespace, op.Check.Check.Partition) { - txnOp := &structs.TxnOp{ - Check: &structs.TxnCheckOp{ - Verb: op.Check.Verb, - Check: *op.Check.Check.HealthCheck, - }, - } - newReq.TxnRequest.Ops = append(newReq.TxnRequest.Ops, txnOp) - continue - } - - if op.Session != nil && op.Session.Session != nil && !IsEnterpriseData(op.Session.Session.Namespace, op.Session.Session.Partition) { - txnOp := &structs.TxnOp{ - Session: &structs.TxnSessionOp{ - Verb: op.Session.Verb, - Session: *op.Session.Session.Session, - }, - } - txnOp.Session.Session.ServiceChecks = nil - for _, sessionServiceCheck := range op.Session.Session.ServiceChecks { - if !IsEnterpriseData(sessionServiceCheck.Namespace, "") { - txnOp.Session.Session.ServiceChecks = append(txnOp.Session.Session.ServiceChecks, sessionServiceCheck) - } - } - newReq.TxnRequest.Ops = append(newReq.TxnRequest.Ops, txnOp) - } - } - - *req = *newReq.TxnRequest - return nil -} - -func decodeACLTokenBatchSet(buf []byte, req *structs.ACLTokenBatchSetRequest) error { - type aclToken struct { - Namespace string - Partition string - *structs.ACLToken - } - type NewACLTokenBatchSetRequest struct { - Tokens []*aclToken - *structs.ACLTokenBatchSetRequest - } - var newReq NewACLTokenBatchSetRequest - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - - for _, token := range newReq.Tokens { - if !IsEnterpriseData(token.Namespace, token.Partition) { - newReq.ACLTokenBatchSetRequest.Tokens = append(newReq.ACLTokenBatchSetRequest.Tokens, token.ACLToken) - } - } - - *req = *newReq.ACLTokenBatchSetRequest - return nil - -} - -func decodeACLPolicyBatchSet(buf []byte, req *structs.ACLPolicyBatchSetRequest) error { - type aclPolicy struct { - Namespace string - Partition string - *structs.ACLPolicy - } - type NewACLPolicyBatchSetRequest struct { - Policies []*aclPolicy - *structs.ACLPolicyBatchSetRequest - } - var newReq NewACLPolicyBatchSetRequest - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - if newReq.ACLPolicyBatchSetRequest == nil { - newReq.ACLPolicyBatchSetRequest = &structs.ACLPolicyBatchSetRequest{} - } - for _, policy := range newReq.Policies { - if !IsEnterpriseData(policy.Namespace, policy.Partition) { - newReq.ACLPolicyBatchSetRequest.Policies = append(newReq.ACLPolicyBatchSetRequest.Policies, policy.ACLPolicy) - } - } - - *req = *newReq.ACLPolicyBatchSetRequest - return nil - -} - -func decodeACLRoleBatchSet(buf []byte, req *structs.ACLRoleBatchSetRequest) error { - type aclRole struct { - Namespace string - Partition string - *structs.ACLRole - } - type NewACLRoleBatchSetRequest struct { - Roles []*aclRole - *structs.ACLRoleBatchSetRequest - } - var newReq NewACLRoleBatchSetRequest - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - - for _, role := range newReq.Roles { - if !IsEnterpriseData(role.Namespace, role.Partition) { - newReq.ACLRoleBatchSetRequest.Roles = append(newReq.ACLRoleBatchSetRequest.Roles, role.ACLRole) - } - } - - *req = *newReq.ACLRoleBatchSetRequest - return nil -} - -func decodeACLBindingRuleBatchSet(buf []byte, req *structs.ACLBindingRuleBatchSetRequest) error { - type aCLBindingRule struct { - Namespace string - Partition string - *structs.ACLBindingRule - } - type NewACLBindingRuleBatchSetRequest struct { - BindingRules []*aCLBindingRule - *structs.ACLBindingRuleBatchSetRequest - } - var newReq NewACLBindingRuleBatchSetRequest - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - if newReq.ACLBindingRuleBatchSetRequest == nil { - newReq.ACLBindingRuleBatchSetRequest = &structs.ACLBindingRuleBatchSetRequest{} - } - for _, rule := range newReq.BindingRules { - if !IsEnterpriseData(rule.Namespace, rule.Partition) { - newReq.ACLBindingRuleBatchSetRequest.BindingRules = append(newReq.ACLBindingRuleBatchSetRequest.BindingRules, rule.ACLBindingRule) - } - } - - *req = *newReq.ACLBindingRuleBatchSetRequest - return nil -} - -func decodeACLAuthMethodBatchSet(buf []byte, req *structs.ACLAuthMethodBatchSetRequest) error { - type aCLAuthMethod struct { - Namespace string - Partition string - *structs.ACLAuthMethod - } - type NewACLAuthMethodBatchSetRequest struct { - AuthMethods []*aCLAuthMethod - *structs.ACLAuthMethodBatchSetRequest - } - var newReq NewACLAuthMethodBatchSetRequest - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - if newReq.ACLAuthMethodBatchSetRequest == nil { - newReq.ACLAuthMethodBatchSetRequest = &structs.ACLAuthMethodBatchSetRequest{} - } - for _, authMethod := range newReq.AuthMethods { - if !IsEnterpriseData(authMethod.Namespace, authMethod.Partition) { - newReq.ACLAuthMethodBatchSetRequest.AuthMethods = append(newReq.ACLAuthMethodBatchSetRequest.AuthMethods, authMethod.ACLAuthMethod) - } - } - - *req = *newReq.ACLAuthMethodBatchSetRequest - return nil -} - -func decodeACLAuthMethodBatchDelete(buf []byte, req *structs.ACLAuthMethodBatchDeleteRequest) error { - type NewACLAuthMethodBatchDeleteRequest struct { - Namespace string - Partition string - *structs.ACLAuthMethodBatchDeleteRequest - } - - var newReq NewACLAuthMethodBatchDeleteRequest - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - - if IsEnterpriseData(newReq.Namespace, newReq.Partition) { - return ErrDroppingTenantedReq - } - - *req = *newReq.ACLAuthMethodBatchDeleteRequest - return nil -} - -func decodeServiceVirtualIP(buf []byte, req *state.ServiceVirtualIP) error { - type serviceName struct { - Namespace string - Partition string - *structs.ServiceName - } - type peeredServiceName struct { - ServiceName *serviceName - *structs.PeeredServiceName - } - type NewServiceVirtualIP struct { - Service *peeredServiceName - *state.ServiceVirtualIP - } - var newReq NewServiceVirtualIP - if err := structs.Decode(buf, &newReq); err != nil { - return err - } - - if newReq.Service != nil && newReq.Service.ServiceName != nil && IsEnterpriseData(newReq.Service.ServiceName.Namespace, newReq.Service.ServiceName.Partition) { - return ErrDroppingTenantedReq - } - newReq.ServiceVirtualIP.Service.ServiceName = *newReq.Service.ServiceName.ServiceName - *req = *newReq.ServiceVirtualIP - return nil -} - -func decodePeeringWrite(buf []byte, req *pbpeering.PeeringWriteRequest) error { - if err := structs.DecodeProto(buf, req); err != nil { - return err - } - - if req.Peering != nil && IsEnterpriseData("", req.Peering.Partition) { - return ErrDroppingTenantedReq - } - - return nil -} - -func decodePeeringDelete(buf []byte, req *pbpeering.PeeringDeleteRequest) error { - if err := structs.DecodeProto(buf, req); err != nil { - return err - } - - if IsEnterpriseData("", req.Partition) { - return ErrDroppingTenantedReq - } - - return nil -} - -func decodePeeringTrustBundleWrite(buf []byte, req *pbpeering.PeeringTrustBundleWriteRequest) error { - if err := structs.DecodeProto(buf, req); err != nil { - return err - } - - if IsEnterpriseData("", req.PeeringTrustBundle.Partition) { - return ErrDroppingTenantedReq - } - - return nil -} - -func decodePeeringTrustBundleDelete(buf []byte, req *pbpeering.PeeringTrustBundleDeleteRequest) error { - if err := structs.DecodeProto(buf, req); err != nil { - return err - } - - if IsEnterpriseData("", req.Partition) { - return ErrDroppingTenantedReq - } - - return nil -} - -func decodeConfigEntryOperation(buf []byte, req *structs.ConfigEntryRequest) error { - - newReq := &ShadowConfigEntryRequest{ - ConfigEntryRequest: req, - } - if err := structs.Decode(buf, newReq); err != nil { - return err - } - shadowConfigEntry := newReq.ConfigEntryRequest.Entry.(ShadowConfigentry) - if err := shadowConfigEntry.CheckEnt(); err != nil { - return err - } - req.Entry = shadowConfigEntry.GetRealConfigEntry() - return nil -} - -type ShadowConfigEntryRequest struct { - *structs.ConfigEntryRequest -} - -func (c *ShadowConfigEntryRequest) UnmarshalBinary(data []byte) error { - // First decode the kind prefix - var kind string - dec := codec.NewDecoderBytes(data, structs.MsgpackHandle) - if err := dec.Decode(&kind); err != nil { - return err - } - - // Then decode the real thing with appropriate kind of ConfigEntry - entry, err := MakeShadowConfigEntry(kind, "") - if err != nil { - return err - } - c.Entry = entry - // Alias juggling to prevent infinite recursive calls back to this decode - // method. - type Alias structs.ConfigEntryRequest - as := struct { - *Alias - }{ - Alias: (*Alias)(c.ConfigEntryRequest), - } - if err := dec.Decode(&as); err != nil { - return err - } - return nil -} -func MakeShadowConfigEntry(kind, name string) (structs.ConfigEntry, error) { - switch kind { - case structs.RateLimitIPConfig: - return nil, ErrDroppingTenantedReq - case structs.ServiceDefaults: - return &ShadowServiceConfigEntry{ServiceConfigEntry: &structs.ServiceConfigEntry{Name: name}}, nil - case structs.ProxyDefaults: - return &ShadowProxyConfigEntry{ProxyConfigEntry: &structs.ProxyConfigEntry{Name: name}}, nil - case structs.ServiceRouter: - return &ShadowServiceRouterConfigEntry{ServiceRouterConfigEntry: &structs.ServiceRouterConfigEntry{Name: name}}, nil - case structs.ServiceSplitter: - return &ShadowServiceSplitterConfigEntry{ServiceSplitterConfigEntry: &structs.ServiceSplitterConfigEntry{Name: name}}, nil - case structs.ServiceResolver: - return &ShadowServiceResolverConfigEntry{ServiceResolverConfigEntry: &structs.ServiceResolverConfigEntry{Name: name}}, nil - case structs.IngressGateway: - return &ShadowIngressGatewayConfigEntry{IngressGatewayConfigEntry: &structs.IngressGatewayConfigEntry{Name: name}}, nil - case structs.TerminatingGateway: - return &ShadowTerminatingGatewayConfigEntry{TerminatingGatewayConfigEntry: &structs.TerminatingGatewayConfigEntry{Name: name}}, nil - case structs.ServiceIntentions: - return &ShadowServiceIntentionsConfigEntry{ServiceIntentionsConfigEntry: &structs.ServiceIntentionsConfigEntry{Name: name}}, nil - case structs.MeshConfig: - return &ShadowMeshConfigEntry{MeshConfigEntry: &structs.MeshConfigEntry{}}, nil - case structs.ExportedServices: - return &ShadowExportedServicesConfigEntry{ExportedServicesConfigEntry: &structs.ExportedServicesConfigEntry{Name: name}}, nil - case structs.SamenessGroup: - return &ShadowSamenessGroupConfigEntry{SamenessGroupConfigEntry: &structs.SamenessGroupConfigEntry{Name: name}}, nil - case structs.APIGateway: - return &ShadowAPIGatewayConfigEntry{APIGatewayConfigEntry: &structs.APIGatewayConfigEntry{Name: name}}, nil - case structs.BoundAPIGateway: - return &ShadowBoundAPIGatewayConfigEntry{BoundAPIGatewayConfigEntry: &structs.BoundAPIGatewayConfigEntry{Name: name}}, nil - case structs.InlineCertificate: - return &ShadowInlineCertificateConfigEntry{InlineCertificateConfigEntry: &structs.InlineCertificateConfigEntry{Name: name}}, nil - case structs.HTTPRoute: - return &ShadowHTTPRouteConfigEntry{HTTPRouteConfigEntry: &structs.HTTPRouteConfigEntry{Name: name}}, nil - case structs.TCPRoute: - return &ShadowTCPRouteConfigEntry{TCPRouteConfigEntry: &structs.TCPRouteConfigEntry{Name: name}}, nil - case structs.JWTProvider: - return &ShadowJWTProviderConfigEntry{JWTProviderConfigEntry: &structs.JWTProviderConfigEntry{Name: name}}, nil - default: - return nil, fmt.Errorf("invalid config entry kind: %s", kind) - } -} - -type ShadowBase struct { - Namespace string - Partition string -} - -func (s ShadowBase) CheckEnt() error { - if IsEnterpriseData(s.Namespace, s.Partition) { - return ErrDroppingTenantedReq - } - return nil -} - -type ShadowConfigentry interface { - CheckEnt() error - GetRealConfigEntry() structs.ConfigEntry -} - -type ShadowProxyConfigEntry struct { - ShadowBase - *structs.ProxyConfigEntry -} - -func (s ShadowProxyConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - return s.ProxyConfigEntry -} - -type ShadowServiceResolverConfigEntry struct { - ShadowBase - *structs.ServiceResolverConfigEntry -} - -func (s ShadowServiceResolverConfigEntry) CheckEnt() error { - if err := s.ShadowBase.CheckEnt(); err != nil { - return err - } - if s.ServiceResolverConfigEntry.Redirect != nil && (IsEnterpriseData(s.ServiceResolverConfigEntry.Redirect.Namespace, s.ServiceResolverConfigEntry.Redirect.Partition) || s.ServiceResolverConfigEntry.Redirect.SamenessGroup != "") { - return errIncompatibleTenantedData - } - for _, failover := range s.ServiceResolverConfigEntry.Failover { - if IsEnterpriseData(failover.Namespace, "") || failover.SamenessGroup != "" { - return errIncompatibleTenantedData - } - for _, target := range failover.Targets { - if IsEnterpriseData(target.Namespace, target.Partition) { - return errIncompatibleTenantedData - } - } - } - return nil -} - -func (s ShadowServiceResolverConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - return s.ServiceResolverConfigEntry -} - -func (e *ShadowProxyConfigEntry) UnmarshalBinary(data []byte) error { - // The goal here is to add a post-decoding operation to - // decoding of a ProxyConfigEntry. The cleanest way I could - // find to do so was to implement the BinaryMarshaller interface - // and use a type alias to do the original round of decoding, - // followed by a MapWalk of the Config to coerce everything - // into JSON compatible types. - type Alias structs.ProxyConfigEntry - as := struct { - *ShadowBase - *Alias - }{ - ShadowBase: &e.ShadowBase, - Alias: (*Alias)(e.ProxyConfigEntry), - } - dec := codec.NewDecoderBytes(data, structs.MsgpackHandle) - if err := dec.Decode(&as); err != nil { - return err - } - config, err := lib.MapWalk(e.Config) - if err != nil { - return err - } - e.Config = config - return nil -} - -type ShadowUpstreamConfig struct { - ShadowBase - *structs.UpstreamConfig -} -type ShadowUpstreamConfiguration struct { - Overrides []*ShadowUpstreamConfig - *structs.UpstreamConfiguration -} -type ShadowServiceConfigEntry struct { - ShadowBase - UpstreamConfig *ShadowUpstreamConfiguration - *structs.ServiceConfigEntry -} - -func (s ShadowServiceConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - if s.UpstreamConfig != nil { - for _, override := range s.UpstreamConfig.Overrides { - if !IsEnterpriseData(override.Namespace, override.Partition) { - if s.ServiceConfigEntry.UpstreamConfig == nil { - s.ServiceConfigEntry.UpstreamConfig = &structs.UpstreamConfiguration{} - } - s.ServiceConfigEntry.UpstreamConfig.Overrides = append(s.ServiceConfigEntry.UpstreamConfig.Overrides, override.UpstreamConfig) - } - } - } - return s.ServiceConfigEntry -} - -type ShadowServiceRouterConfigEntry struct { - ShadowBase - *structs.ServiceRouterConfigEntry -} - -func (s ShadowServiceRouterConfigEntry) CheckEnt() error { - if err := s.ShadowBase.CheckEnt(); err != nil { - return err - } - for _, route := range s.ServiceRouterConfigEntry.Routes { - if IsEnterpriseData(route.Destination.Namespace, route.Destination.Partition) { - return errIncompatibleTenantedData - } - } - return nil -} - -func (s ShadowServiceRouterConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - return s.ServiceRouterConfigEntry -} - -type ShadowServiceSplitterConfigEntry struct { - ShadowBase - *structs.ServiceSplitterConfigEntry -} - -func (s ShadowServiceSplitterConfigEntry) CheckEnt() error { - if err := s.ShadowBase.CheckEnt(); err != nil { - return err - } - for _, split := range s.ServiceSplitterConfigEntry.Splits { - if IsEnterpriseData(split.Namespace, split.Partition) { - return errIncompatibleTenantedData - } - } - return nil -} -func (s ShadowServiceSplitterConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - return s.ServiceSplitterConfigEntry -} - -type ShadowIngressService struct { - ShadowBase - *structs.IngressService -} -type ShadowIngressListener struct { - Services []ShadowIngressService - *structs.IngressListener -} -type ShadowIngressGatewayConfigEntry struct { - ShadowBase - Listeners []ShadowIngressListener - *structs.IngressGatewayConfigEntry -} - -func (s ShadowIngressGatewayConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - for _, listner := range s.Listeners { - for _, svc := range listner.Services { - if !IsEnterpriseData(svc.Namespace, svc.Partition) { - listner.IngressListener.Services = append(listner.IngressListener.Services, *svc.IngressService) - } - } - if len(listner.IngressListener.Services) == 0 { - continue - } - s.IngressGatewayConfigEntry.Listeners = append(s.IngressGatewayConfigEntry.Listeners, *listner.IngressListener) - } - return s.IngressGatewayConfigEntry -} - -type ShadowLinkedService struct { - ShadowBase - *structs.LinkedService -} - -type ShadowTerminatingGatewayConfigEntry struct { - ShadowBase - Services []ShadowLinkedService - *structs.TerminatingGatewayConfigEntry -} - -func (s ShadowTerminatingGatewayConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - for _, svc := range s.Services { - if !IsEnterpriseData(svc.Namespace, svc.Partition) { - s.TerminatingGatewayConfigEntry.Services = append(s.TerminatingGatewayConfigEntry.Services, *svc.LinkedService) - } - } - return s.TerminatingGatewayConfigEntry -} - -type ShadowSourceIntention struct { - ShadowBase - *structs.SourceIntention -} -type ShadowServiceIntentionsConfigEntry struct { - ShadowBase - Sources []*ShadowSourceIntention - *structs.ServiceIntentionsConfigEntry -} - -func (s ShadowServiceIntentionsConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - for _, source := range s.Sources { - if !IsEnterpriseData(source.Namespace, source.Partition) && source.SamenessGroup == "" { - s.ServiceIntentionsConfigEntry.Sources = append(s.ServiceIntentionsConfigEntry.Sources, source.SourceIntention) - } - } - return s.ServiceIntentionsConfigEntry -} - -type ShadowMeshConfigEntry struct { - ShadowBase - *structs.MeshConfigEntry -} - -func (s ShadowMeshConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - return s.MeshConfigEntry -} - -type ShadowExportedServicesConfigEntry struct { - ShadowBase - *structs.ExportedServicesConfigEntry -} - -func (s ShadowExportedServicesConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - services := []structs.ExportedService{} - for _, svc := range s.ExportedServicesConfigEntry.Services { - if !IsEnterpriseData(svc.Namespace, "") { - consumers := []structs.ServiceConsumer{} - for _, consumer := range svc.Consumers { - if !IsEnterpriseData("", consumer.Partition) && consumer.SamenessGroup == "" { - consumers = append(consumers, consumer) - } - } - if len(consumers) == 0 { - continue - } - services = append(services, svc) - } - } - s.ExportedServicesConfigEntry.Services = services - return s.ExportedServicesConfigEntry -} - -type ShadowSamenessGroupConfigEntry struct { - ShadowBase - *structs.SamenessGroupConfigEntry -} - -func (s ShadowSamenessGroupConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - return s.SamenessGroupConfigEntry -} - -type ShadowAPIGatewayConfigEntry struct { - ShadowBase - *structs.APIGatewayConfigEntry -} - -func (s ShadowAPIGatewayConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - return s.APIGatewayConfigEntry -} - -type ShadowBoundAPIGatewayListener struct { - Routes []ShadowResourceReference - Certificates []ShadowResourceReference - *structs.BoundAPIGatewayListener -} -type ShadowBoundAPIGatewayConfigEntry struct { - ShadowBase - Listeners []ShadowBoundAPIGatewayListener - *structs.BoundAPIGatewayConfigEntry -} - -func (s ShadowBoundAPIGatewayConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - for _, listner := range s.Listeners { - for _, route := range listner.Routes { - if !IsEnterpriseData(route.Namespace, route.Partition) { - listner.BoundAPIGatewayListener.Routes = append(listner.BoundAPIGatewayListener.Routes, *route.ResourceReference) - } - } - for _, cf := range listner.Certificates { - if !IsEnterpriseData(cf.Namespace, cf.Partition) { - listner.BoundAPIGatewayListener.Certificates = append(listner.BoundAPIGatewayListener.Certificates, *cf.ResourceReference) - } - } - s.BoundAPIGatewayConfigEntry.Listeners = append(s.BoundAPIGatewayConfigEntry.Listeners, *listner.BoundAPIGatewayListener) - } - return s.BoundAPIGatewayConfigEntry -} - -type ShadowInlineCertificateConfigEntry struct { - ShadowBase - *structs.InlineCertificateConfigEntry -} - -func (s ShadowInlineCertificateConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - return s.InlineCertificateConfigEntry -} - -type ShadowHTTPService struct { - ShadowBase - *structs.HTTPService -} -type ShadowHTTPRouteRule struct { - Services []ShadowHTTPService - *structs.HTTPRouteRule -} -type ShadowResourceReference struct { - ShadowBase - *structs.ResourceReference -} -type ShadowHTTPRouteConfigEntry struct { - ShadowBase - Parents []ShadowResourceReference - Rules []ShadowHTTPRouteRule - *structs.HTTPRouteConfigEntry -} - -func (s ShadowHTTPRouteConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - for _, parent := range s.Parents { - if !IsEnterpriseData(parent.Namespace, parent.Partition) { - s.HTTPRouteConfigEntry.Parents = append(s.HTTPRouteConfigEntry.Parents, *parent.ResourceReference) - } - } - for _, rule := range s.Rules { - for _, svc := range rule.Services { - if !IsEnterpriseData(svc.Namespace, svc.Partition) { - rule.HTTPRouteRule.Services = append(rule.HTTPRouteRule.Services, *svc.HTTPService) - } - } - s.HTTPRouteConfigEntry.Rules = append(s.HTTPRouteConfigEntry.Rules, *rule.HTTPRouteRule) - } - return s.HTTPRouteConfigEntry -} - -type ShadowTCPService struct { - ShadowBase - *structs.TCPService -} -type ShadowTCPRouteConfigEntry struct { - ShadowBase - Parents []ShadowResourceReference - Services []ShadowTCPService - *structs.TCPRouteConfigEntry -} - -func (s ShadowTCPRouteConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - for _, parent := range s.Parents { - if !IsEnterpriseData(parent.Namespace, parent.Partition) { - s.TCPRouteConfigEntry.Parents = append(s.TCPRouteConfigEntry.Parents, *parent.ResourceReference) - } - } - for _, svc := range s.Services { - if !IsEnterpriseData(svc.Namespace, svc.Partition) { - s.TCPRouteConfigEntry.Services = append(s.TCPRouteConfigEntry.Services, *svc.TCPService) - } - } - return s.TCPRouteConfigEntry -} - -type ShadowJWTProviderConfigEntry struct { - ShadowBase - *structs.JWTProviderConfigEntry -} - -func (s ShadowJWTProviderConfigEntry) GetRealConfigEntry() structs.ConfigEntry { - return s.JWTProviderConfigEntry -} diff --git a/agent/consul/fsm/fsm.go b/agent/consul/fsm/fsm.go index 7449858af2bf6..4357ad7c39e2b 100644 --- a/agent/consul/fsm/fsm.go +++ b/agent/consul/fsm/fsm.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package fsm @@ -195,10 +195,6 @@ func (c *FSM) Apply(log *raft.Log) interface{} { c.logger.Warn("ignoring unknown message type, upgrade to newer version", "type", msgType) return nil } - if structs.CEDowngrade && msgType >= 64 { - c.logger.Warn("ignoring enterprise message, for downgrading to oss", "type", msgType) - return nil - } panic(fmt.Errorf("failed to apply request: %#v", buf)) } @@ -267,10 +263,7 @@ func (c *FSM) Restore(old io.ReadCloser) error { return err } default: - if structs.CEDowngrade && msg >= 64 { - c.logger.Warn("ignoring enterprise message , for downgrading to oss", "type", msg) - return nil - } else if msg >= 64 { + if msg >= 64 { return fmt.Errorf("msg type <%d> is a Consul Enterprise log entry. Consul CE cannot restore it", msg) } else { return fmt.Errorf("Unrecognized msg type %d", msg) @@ -303,7 +296,9 @@ func (c *FSM) Restore(old io.ReadCloser) error { // for new data. To prevent that inconsistency we refresh the topics while holding // the lock which ensures that any subscriptions to topics for FSM generated events if c.deps.Publisher != nil { - c.deps.Publisher.RefreshAllTopics() + c.deps.Publisher.RefreshTopic(state.EventTopicServiceHealth) + c.deps.Publisher.RefreshTopic(state.EventTopicServiceHealthConnect) + c.deps.Publisher.RefreshTopic(state.EventTopicCARoots) } c.stateLock.Unlock() @@ -435,11 +430,7 @@ func (c *FSM) registerStreamSnapshotHandlers() { err = c.deps.Publisher.RegisterHandler(state.EventTopicJWTProvider, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { return c.State().JWTProviderSnapshot(req, buf) }, true) - panicIfErr(err) - err = c.deps.Publisher.RegisterHandler(state.EventTopicExportedServices, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { - return c.State().ExportedServicesSnapshot(req, buf) - }, true) panicIfErr(err) } diff --git a/agent/consul/fsm/fsm_test.go b/agent/consul/fsm/fsm_test.go index 839401014b52e..aa31615a5aa7a 100644 --- a/agent/consul/fsm/fsm_test.go +++ b/agent/consul/fsm/fsm_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package fsm diff --git a/agent/consul/fsm/log_verification_chunking_shim.go b/agent/consul/fsm/log_verification_chunking_shim.go index 4f40c1820b74d..a74b92b5684d3 100644 --- a/agent/consul/fsm/log_verification_chunking_shim.go +++ b/agent/consul/fsm/log_verification_chunking_shim.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package fsm diff --git a/agent/consul/fsm/snapshot.go b/agent/consul/fsm/snapshot.go index 24c8450cad2c1..a73b17daa4148 100644 --- a/agent/consul/fsm/snapshot.go +++ b/agent/consul/fsm/snapshot.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package fsm diff --git a/agent/consul/fsm/snapshot_ce.go b/agent/consul/fsm/snapshot_ce.go index 0fcd38703661f..0ac39426a82b9 100644 --- a/agent/consul/fsm/snapshot_ce.go +++ b/agent/consul/fsm/snapshot_ce.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package fsm diff --git a/agent/consul/fsm/snapshot_ce_test.go b/agent/consul/fsm/snapshot_ce_test.go index 70ab2000faa41..b08e75716f385 100644 --- a/agent/consul/fsm/snapshot_ce_test.go +++ b/agent/consul/fsm/snapshot_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package fsm diff --git a/agent/consul/fsm/snapshot_test.go b/agent/consul/fsm/snapshot_test.go index de4e64e344c37..21a7460410e1f 100644 --- a/agent/consul/fsm/snapshot_test.go +++ b/agent/consul/fsm/snapshot_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package fsm @@ -566,6 +566,7 @@ func TestFSM_SnapshotRestore_CE(t *testing.T) { }, Tenancy: &pbresource.Tenancy{ Partition: "default", + PeerName: "local", Namespace: "default", }, Name: "bar", diff --git a/agent/consul/gateway_locator.go b/agent/consul/gateway_locator.go index 6503ca0c979d5..8f8ca29fbb469 100644 --- a/agent/consul/gateway_locator.go +++ b/agent/consul/gateway_locator.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/gateway_locator_test.go b/agent/consul/gateway_locator_test.go index a3e9da3d69007..f9b1daf26d825 100644 --- a/agent/consul/gateway_locator_test.go +++ b/agent/consul/gateway_locator_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/gateways/controller_gateways.go b/agent/consul/gateways/controller_gateways.go index fa830337fc47f..88d65bda4fb25 100644 --- a/agent/consul/gateways/controller_gateways.go +++ b/agent/consul/gateways/controller_gateways.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package gateways @@ -63,8 +63,6 @@ func (r *apiGatewayReconciler) Reconcile(ctx context.Context, req controller.Req return reconcileEntry(r.fsm.State(), r.logger, ctx, req, r.reconcileTCPRoute, r.cleanupRoute) case structs.InlineCertificate: return r.enqueueCertificateReferencedGateways(r.fsm.State(), ctx, req) - case structs.JWTProvider: - return r.enqueueJWTProviderReferencedGatewaysAndHTTPRoutes(r.fsm.State(), ctx, req) default: return nil } @@ -235,18 +233,7 @@ func (r *apiGatewayReconciler) reconcileGateway(_ context.Context, req controlle logger.Warn("error retrieving bound api gateway", "error", err) return err } - - _, jwtProvidersConfigEntries, err := store.ConfigEntriesByKind(nil, structs.JWTProvider, wildcardMeta()) - if err != nil { - return err - } - - jwtProviders := make(map[string]*structs.JWTProviderConfigEntry, len(jwtProvidersConfigEntries)) - for _, provider := range jwtProvidersConfigEntries { - jwtProviders[provider.GetName()] = provider.(*structs.JWTProviderConfigEntry) - } - - meta := newGatewayMeta(gateway, bound, jwtProviders) + meta := newGatewayMeta(gateway, bound) certificateErrors, err := meta.checkCertificates(store) if err != nil { @@ -254,22 +241,16 @@ func (r *apiGatewayReconciler) reconcileGateway(_ context.Context, req controlle return err } - jwtErrors, err := meta.checkJWTProviders() - if err != nil { - logger.Warn("error checking gateway JWT Providers", "error", err) - return err - } - - // set each listener as having resolved refs, then overwrite that status condition + // set each listener as having valid certs, then overwrite that status condition // if there are any certificate errors - meta.eachListener(func(_ *structs.APIGatewayListener, bound *structs.BoundAPIGatewayListener) error { + meta.eachListener(func(listener *structs.APIGatewayListener, bound *structs.BoundAPIGatewayListener) error { listenerRef := structs.ResourceReference{ Kind: structs.APIGateway, Name: meta.BoundGateway.Name, SectionName: bound.Name, EnterpriseMeta: meta.BoundGateway.EnterpriseMeta, } - updater.SetCondition(resolvedRefs(listenerRef)) + updater.SetCondition(validCertificate(listenerRef)) return nil }) @@ -277,19 +258,9 @@ func (r *apiGatewayReconciler) reconcileGateway(_ context.Context, req controlle updater.SetCondition(invalidCertificate(ref, err)) } - for ref, err := range jwtErrors { - updater.SetCondition(invalidJWTProvider(ref, err)) - } - if len(certificateErrors) > 0 { updater.SetCondition(invalidCertificates()) - } - - if len(jwtErrors) > 0 { - updater.SetCondition(invalidJWTProviders()) - } - - if len(certificateErrors) == 0 && len(jwtErrors) == 0 { + } else { updater.SetCondition(gatewayAccepted()) } @@ -540,8 +511,7 @@ func NewAPIGatewayController(fsm *fsm.FSM, publisher state.EventPublisher, updat logger: logger, updater: updater, } - reconciler.controller = controller.New(publisher, reconciler). - WithLogger(logger.With("controller", "apiGatewayController")) + reconciler.controller = controller.New(publisher, reconciler) return reconciler.controller.Subscribe( &stream.SubscribeRequest{ Topic: state.EventTopicAPIGateway, @@ -566,11 +536,6 @@ func NewAPIGatewayController(fsm *fsm.FSM, publisher state.EventPublisher, updat &stream.SubscribeRequest{ Topic: state.EventTopicInlineCertificate, Subject: stream.SubjectWildcard, - }, - ).Subscribe( - &stream.SubscribeRequest{ - Topic: state.EventTopicJWTProvider, - Subject: stream.SubjectWildcard, }) } @@ -593,10 +558,6 @@ type gatewayMeta struct { // the map values are pointers so that we can update them directly // and have the changes propagate back to the container gateways. boundListeners map[string]*structs.BoundAPIGatewayListener - // jwtProviders holds the list of all the JWT Providers in a given partition - // we expect this list to be relatively small so we're okay with holding them all - // in memory - jwtProviders map[string]*structs.JWTProviderConfigEntry } // getAllGatewayMeta returns a pre-constructed list of all valid gateway and state @@ -612,16 +573,6 @@ func getAllGatewayMeta(store *state.Store) ([]*gatewayMeta, error) { return nil, err } - _, jwtProvidersConfigEntries, err := store.ConfigEntriesByKind(nil, structs.JWTProvider, wildcardMeta()) - if err != nil { - return nil, err - } - - jwtProviders := make(map[string]*structs.JWTProviderConfigEntry, len(jwtProvidersConfigEntries)) - for _, provider := range jwtProvidersConfigEntries { - jwtProviders[provider.GetName()] = provider.(*structs.JWTProviderConfigEntry) - } - meta := make([]*gatewayMeta, 0, len(boundGateways)) for _, b := range boundGateways { bound := b.(*structs.BoundAPIGatewayConfigEntry) @@ -632,7 +583,6 @@ func getAllGatewayMeta(store *state.Store) ([]*gatewayMeta, error) { meta = append(meta, (&gatewayMeta{ BoundGateway: bound, Gateway: gateway, - jwtProviders: jwtProviders, }).initialize()) break } @@ -691,17 +641,7 @@ func (g *gatewayMeta) updateRouteBinding(route structs.BoundRoute) (bool, []stru if err != nil { errors[ref] = err } - - isValidJWT := true - if httpRoute, ok := route.(*structs.HTTPRouteConfigEntry); ok { - var jwtErrors map[structs.ResourceReference]error - isValidJWT, jwtErrors = g.validateJWTForRoute(httpRoute) - for ref, err := range jwtErrors { - errors[ref] = err - } - } - - if didBind && isValidJWT { + if didBind { refDidBind = true listenerBound[listener.Name] = true } @@ -712,7 +652,6 @@ func (g *gatewayMeta) updateRouteBinding(route structs.BoundRoute) (bool, []stru if !refDidBind && errors[ref] == nil { errors[ref] = fmt.Errorf("failed to bind route %s to gateway %s with listener '%s'", route.GetName(), g.Gateway.Name, ref.SectionName) } - if refDidBind { for _, serviceName := range route.GetServiceNames() { g.BoundGateway.Services.AddService(structs.NewServiceName(serviceName.Name, &serviceName.EnterpriseMeta), routeRef) @@ -888,7 +827,7 @@ func (g *gatewayMeta) initialize() *gatewayMeta { } // newGatewayMeta returns an object that wraps the given APIGateway and BoundAPIGateway -func newGatewayMeta(gateway *structs.APIGatewayConfigEntry, bound structs.ConfigEntry, jwtProviders map[string]*structs.JWTProviderConfigEntry) *gatewayMeta { +func newGatewayMeta(gateway *structs.APIGatewayConfigEntry, bound structs.ConfigEntry) *gatewayMeta { var b *structs.BoundAPIGatewayConfigEntry if bound == nil { b = &structs.BoundAPIGatewayConfigEntry{ @@ -914,7 +853,6 @@ func newGatewayMeta(gateway *structs.APIGatewayConfigEntry, bound structs.Config return (&gatewayMeta{ BoundGateway: b, Gateway: gateway, - jwtProviders: jwtProviders, }).initialize() } @@ -932,7 +870,7 @@ func gatewayAccepted() structs.Condition { // invalidCertificate returns a condition used when a gateway references a // certificate that does not exist. It takes a ref used to scope the condition // to a given APIGateway listener. -func resolvedRefs(ref structs.ResourceReference) structs.Condition { +func validCertificate(ref structs.ResourceReference) structs.Condition { return structs.NewGatewayCondition( api.GatewayConditionResolvedRefs, api.ConditionStatusTrue, @@ -967,31 +905,6 @@ func invalidCertificates() structs.Condition { ) } -// invalidJWTProvider returns a condition used when a gateway listener references -// a JWTProvider that does not exist. It takes a ref used to scope the condition -// to a given APIGateway listener. -func invalidJWTProvider(ref structs.ResourceReference, err error) structs.Condition { - return structs.NewGatewayCondition( - api.GatewayConditionResolvedRefs, - api.ConditionStatusFalse, - api.GatewayListenerReasonInvalidJWTProviderRef, - err.Error(), - ref, - ) -} - -// invalidJWTProviders is used to set the overall condition of the APIGateway -// to invalid due to missing JWT providers that it references. -func invalidJWTProviders() structs.Condition { - return structs.NewGatewayCondition( - api.GatewayConditionAccepted, - api.ConditionStatusFalse, - api.GatewayReasonInvalidJWTProviders, - "gateway references invalid JWT Providers", - structs.ResourceReference{}, - ) -} - // gatewayListenerNoConflicts marks an APIGateway listener as having no conflicts within its // bound routes func gatewayListenerNoConflicts(ref structs.ResourceReference) structs.Condition { diff --git a/agent/consul/gateways/controller_gateways_ce.go b/agent/consul/gateways/controller_gateways_ce.go deleted file mode 100644 index 4e2aa9a523c84..0000000000000 --- a/agent/consul/gateways/controller_gateways_ce.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package gateways - -import ( - "context" - - "github.com/hashicorp/consul/agent/consul/controller" - "github.com/hashicorp/consul/agent/consul/state" - "github.com/hashicorp/consul/agent/structs" -) - -func (r *apiGatewayReconciler) enqueueJWTProviderReferencedGatewaysAndHTTPRoutes(_ *state.Store, _ context.Context, _ controller.Request) error { - return nil -} - -func (m *gatewayMeta) checkJWTProviders() (map[structs.ResourceReference]error, error) { - return nil, nil -} - -func (m *gatewayMeta) validateJWTForRoute(_ *structs.HTTPRouteConfigEntry) (bool, map[structs.ResourceReference]error) { - return true, nil -} diff --git a/agent/consul/gateways/controller_gateways_test.go b/agent/consul/gateways/controller_gateways_test.go index 666f9e002cf47..94fc46d7d490c 100644 --- a/agent/consul/gateways/controller_gateways_test.go +++ b/agent/consul/gateways/controller_gateways_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package gateways @@ -2032,7 +2032,7 @@ func TestAPIGatewayController(t *testing.T) { EnterpriseMeta: *defaultMeta, SectionName: "listener", }), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", EnterpriseMeta: *defaultMeta, @@ -2130,7 +2130,7 @@ func TestAPIGatewayController(t *testing.T) { EnterpriseMeta: *defaultMeta, SectionName: "listener", }), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", EnterpriseMeta: *defaultMeta, @@ -2269,7 +2269,7 @@ func TestAPIGatewayController(t *testing.T) { EnterpriseMeta: *defaultMeta, SectionName: "listener", }), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", EnterpriseMeta: *defaultMeta, @@ -2428,7 +2428,7 @@ func TestAPIGatewayController(t *testing.T) { EnterpriseMeta: *defaultMeta, SectionName: "listener", }), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", EnterpriseMeta: *defaultMeta, @@ -2579,7 +2579,7 @@ func TestAPIGatewayController(t *testing.T) { EnterpriseMeta: *defaultMeta, SectionName: "listener", }), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", EnterpriseMeta: *defaultMeta, @@ -2757,12 +2757,12 @@ func TestAPIGatewayController(t *testing.T) { Name: "gateway", SectionName: "tcp-listener", }), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", SectionName: "http-listener", }), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", SectionName: "tcp-listener", @@ -3121,7 +3121,7 @@ func TestAPIGatewayController(t *testing.T) { Name: "gateway", SectionName: "http-listener", }), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", SectionName: "http-listener", @@ -3476,7 +3476,7 @@ func TestAPIGatewayController(t *testing.T) { Name: "gateway", SectionName: "http-listener", }), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", SectionName: "http-listener", @@ -3574,12 +3574,12 @@ func TestAPIGatewayController(t *testing.T) { }, Status: structs.Status{ Conditions: []structs.Condition{ - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", SectionName: "listener-1", }), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", SectionName: "listener-2", @@ -3800,7 +3800,7 @@ func TestAPIGatewayController(t *testing.T) { Name: "gateway", SectionName: "invalid-listener", }, errors.New("certificate \"missing certificate\" not found")), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", SectionName: "valid-listener", @@ -3907,7 +3907,7 @@ func TestAPIGatewayController(t *testing.T) { Name: "gateway", SectionName: "http-listener", }), - resolvedRefs(structs.ResourceReference{ + validCertificate(structs.ResourceReference{ Kind: structs.APIGateway, Name: "gateway", SectionName: "http-listener", diff --git a/agent/consul/grpc_integration_test.go b/agent/consul/grpc_integration_test.go index 6ae49e09fa3aa..678403a450409 100644 --- a/agent/consul/grpc_integration_test.go +++ b/agent/consul/grpc_integration_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index d26bbcd3b6f2f..c1286cce172fd 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/health_endpoint_test.go b/agent/consul/health_endpoint_test.go index b47159c229424..21a83ea90db2e 100644 --- a/agent/consul/health_endpoint_test.go +++ b/agent/consul/health_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/helper_test.go b/agent/consul/helper_test.go index d21523b8fecff..0619004c546e6 100644 --- a/agent/consul/helper_test.go +++ b/agent/consul/helper_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index df05428145976..b00ebfbb46f0d 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -752,7 +752,19 @@ func (s *Intention) Check(args *structs.IntentionQueryRequest, reply *structs.In } } - defaultAllow := DefaultIntentionAllow(authz, s.srv.config.DefaultIntentionPolicy) + // Note: the default intention policy is like an intention with a + // wildcarded destination in that it is limited to L4-only. + + // No match, we need to determine the default behavior. We do this by + // fetching the default intention behavior from the resolved authorizer. + // The default behavior if ACLs are disabled is to allow connections + // to mimic the behavior of Consul itself: everything is allowed if + // ACLs are disabled. + // + // NOTE(mitchellh): This is the same behavior as the agent authorize + // endpoint. If this behavior is incorrect, we should also change it there + // which is much more important. + defaultDecision := authz.IntentionDefaultAllow(nil) store := s.srv.fsm.State() @@ -772,7 +784,7 @@ func (s *Intention) Check(args *structs.IntentionQueryRequest, reply *structs.In Partition: query.DestinationPartition, Intentions: intentions, MatchType: structs.IntentionMatchDestination, - DefaultAllow: defaultAllow, + DefaultDecision: defaultDecision, AllowPermissions: false, } decision, err := store.IntentionDecision(opts) diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 08480501d7bf0..1ecd4a7e26533 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -550,8 +550,8 @@ func TestIntentionApply_WithoutIDs(t *testing.T) { }, }, RaftIndex: entry.RaftIndex, + Hash: entry.GetHash(), } - entry.Hash = 0 require.Equal(t, expect, entry) } @@ -689,8 +689,8 @@ func TestIntentionApply_WithoutIDs(t *testing.T) { }, }, RaftIndex: entry.RaftIndex, + Hash: entry.GetHash(), } - entry.Hash = 0 require.Equal(t, expect, entry) } @@ -758,8 +758,9 @@ func TestIntentionApply_WithoutIDs(t *testing.T) { }, }, RaftIndex: entry.RaftIndex, + Hash: entry.GetHash(), } - entry.Hash = 0 + require.Equal(t, expect, entry) } @@ -1960,89 +1961,106 @@ func TestIntentionMatch_acl(t *testing.T) { } } -func TestIntentionCheck(t *testing.T) { +// Test the Check method defaults to allow with no ACL set. +func TestIntentionCheck_defaultNoACL(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } t.Parallel() - type testcase struct { - aclsEnabled bool - defaultACL string - defaultIxn string - expectAllowed bool - } - tcs := map[string]testcase{ - "acls disabled, no default intention policy": { - aclsEnabled: false, - expectAllowed: true, - }, - "acls disabled, default intention allow": { - aclsEnabled: false, - defaultIxn: "allow", - expectAllowed: true, - }, - "acls disabled, default intention deny": { - aclsEnabled: false, - defaultIxn: "deny", - expectAllowed: false, - }, - "acls deny, no default intention policy": { - aclsEnabled: true, - defaultACL: "deny", - expectAllowed: false, - }, - "acls allow, no default intention policy": { - aclsEnabled: true, - defaultACL: "allow", - expectAllowed: true, - }, - "acls deny, default intention allow": { - aclsEnabled: true, - defaultACL: "deny", - defaultIxn: "allow", - expectAllowed: true, + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + waitForLeaderEstablishment(t, s1) + + // Test + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Check: &structs.IntentionQueryCheck{ + SourceName: "bar", + DestinationName: "qux", + SourceType: structs.IntentionSourceConsul, }, - "acls allow, default intention deny": { - aclsEnabled: true, - defaultACL: "allow", - defaultIxn: "deny", - expectAllowed: false, + } + var resp structs.IntentionQueryCheckResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.Check", req, &resp)) + require.True(t, resp.Allowed) +} + +// Test the Check method defaults to deny with allowlist ACLs. +func TestIntentionCheck_defaultACLDeny(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + waitForLeaderEstablishment(t, s1) + + // Check + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Check: &structs.IntentionQueryCheck{ + SourceName: "bar", + DestinationName: "qux", + SourceType: structs.IntentionSourceConsul, }, } - for name, tc := range tcs { - tc := tc - t.Run(name, func(t *testing.T) { - t.Parallel() - _, s1 := testServerWithConfig(t, func(c *Config) { - if tc.aclsEnabled { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = tc.defaultACL - } - c.DefaultIntentionPolicy = tc.defaultIxn - }) - codec := rpcClient(t, s1) + req.Token = "root" + var resp structs.IntentionQueryCheckResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.Check", req, &resp)) + require.False(t, resp.Allowed) +} - waitForLeaderEstablishment(t, s1) +// Test the Check method defaults to deny with denylist ACLs. +func TestIntentionCheck_defaultACLAllow(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } - req := &structs.IntentionQueryRequest{ - Datacenter: "dc1", - Check: &structs.IntentionQueryCheck{ - SourceName: "bar", - DestinationName: "qux", - SourceType: structs.IntentionSourceConsul, - }, - } - req.Token = "root" + t.Parallel() - var resp structs.IntentionQueryCheckResponse - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.Check", req, &resp)) - require.Equal(t, tc.expectAllowed, resp.Allowed) - }) + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "allow" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + waitForLeaderEstablishment(t, s1) + + // Check + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Check: &structs.IntentionQueryCheck{ + SourceName: "bar", + DestinationName: "qux", + SourceType: structs.IntentionSourceConsul, + }, } + req.Token = "root" + var resp structs.IntentionQueryCheckResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.Check", req, &resp)) + require.True(t, resp.Allowed) } // Test the Check method requires service:read permission. diff --git a/agent/consul/internal_endpoint.go b/agent/consul/internal_endpoint.go index af27842d20457..bf8e200fce6f0 100644 --- a/agent/consul/internal_endpoint.go +++ b/agent/consul/internal_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -306,7 +306,7 @@ func (m *Internal) ServiceTopology(args *structs.ServiceSpecificRequest, reply * &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - defaultAllow := DefaultIntentionAllow(authz, m.srv.config.DefaultIntentionPolicy) + defaultAllow := authz.IntentionDefaultAllow(nil) index, topology, err := state.ServiceTopology(ws, args.Datacenter, args.ServiceName, args.ServiceKind, defaultAllow, &args.EnterpriseMeta) if err != nil { @@ -375,10 +375,10 @@ func (m *Internal) internalUpstreams(args *structs.ServiceSpecificRequest, reply &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - defaultAllow := DefaultIntentionAllow(authz, m.srv.config.DefaultIntentionPolicy) + defaultDecision := authz.IntentionDefaultAllow(nil) sn := structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta) - index, services, err := state.IntentionTopology(ws, sn, false, defaultAllow, intentionTarget) + index, services, err := state.IntentionTopology(ws, sn, false, defaultDecision, intentionTarget) if err != nil { return err } diff --git a/agent/consul/internal_endpoint_test.go b/agent/consul/internal_endpoint_test.go index a7f132810145d..dac01e24004ed 100644 --- a/agent/consul/internal_endpoint_test.go +++ b/agent/consul/internal_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -2384,12 +2384,14 @@ func TestInternal_ServiceTopology_ACL(t *testing.T) { } t.Parallel() - _, s1 := testServerWithConfig(t, func(c *Config) { + dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true c.ACLInitialManagementToken = TestDefaultInitialManagementToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() testrpc.WaitForLeader(t, s1.RPC, "dc1") @@ -2471,40 +2473,6 @@ service "web" { policy = "read" } }) } -// Tests that default intention deny policy overrides the ACL allow policy. -// More comprehensive tests are done at the state store so this is minimal -// coverage to be confident that the override happens. -func TestInternal_ServiceTopology_DefaultIntentionPolicy(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - _, s1 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = TestDefaultInitialManagementToken - c.ACLResolverSettings.ACLDefaultPolicy = "allow" - c.DefaultIntentionPolicy = "deny" - }) - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - codec := rpcClient(t, s1) - - registerTestTopologyEntries(t, codec, TestDefaultInitialManagementToken) - - args := structs.ServiceSpecificRequest{ - Datacenter: "dc1", - ServiceName: "redis", - QueryOptions: structs.QueryOptions{Token: TestDefaultInitialManagementToken}, - } - var out structs.IndexedServiceTopology - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceTopology", &args, &out)) - - webSN := structs.NewServiceName("web", acl.DefaultEnterpriseMeta()) - require.False(t, out.ServiceTopology.DownstreamDecisions[webSN.String()].DefaultAllow) -} - func TestInternal_IntentionUpstreams(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/consul/issue_test.go b/agent/consul/issue_test.go index 17624400fe3f1..14928a9db99d1 100644 --- a/agent/consul/issue_test.go +++ b/agent/consul/issue_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/kvs_endpoint.go b/agent/consul/kvs_endpoint.go index 65dc2cd56d40f..183f95f7f8bf0 100644 --- a/agent/consul/kvs_endpoint.go +++ b/agent/consul/kvs_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/kvs_endpoint_test.go b/agent/consul/kvs_endpoint_test.go index dc4272c4bd55d..ca9960f4b6e4e 100644 --- a/agent/consul/kvs_endpoint_test.go +++ b/agent/consul/kvs_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 53312c7fe53c8..17408d4ef4419 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -1,13 +1,13 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul import ( "context" - "errors" "fmt" "net" + "reflect" "strconv" "strings" "sync" @@ -16,29 +16,21 @@ import ( "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" - "github.com/google/go-cmp/cmp" - "github.com/oklog/ulid/v2" - "golang.org/x/time/rate" - "google.golang.org/protobuf/types/known/anypb" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" "github.com/hashicorp/go-version" "github.com/hashicorp/raft" "github.com/hashicorp/serf/serf" + "golang.org/x/time/rate" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs/aclfilter" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" - "github.com/hashicorp/consul/proto-public/pbresource" - pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" + "github.com/hashicorp/consul/types" ) var LeaderSummaries = []prometheus.SummaryDefinition{ @@ -349,18 +341,6 @@ func (s *Server) establishLeadership(ctx context.Context) error { s.startLogVerification(ctx) } - if s.useV2Tenancy { - if err := s.initTenancy(ctx, s.storageBackend); err != nil { - return err - } - } - - if s.useV2Resources { - if err := s.initConsulService(ctx, pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan)); err != nil { - return err - } - } - if s.config.Reporting.License.Enabled && s.reportingManager != nil { s.reportingManager.StartReportingAgent() } @@ -586,87 +566,6 @@ func (s *Server) initializeManagementToken(name, secretID string) error { return nil } -func (s *Server) upsertManagementToken(name, secretID string) error { - state := s.fsm.State() - if _, err := uuid.ParseUUID(secretID); err != nil { - s.logger.Warn("Configuring a non-UUID management token is deprecated") - } - - _, token, err := state.ACLTokenGetBySecret(nil, secretID, nil) - if err != nil { - return fmt.Errorf("failed to get %s: %v", name, err) - } - - if token != nil { - return nil - } - - accessor, err := lib.GenerateUUID(s.checkTokenUUID) - if err != nil { - return fmt.Errorf("failed to generate the accessor ID for %s: %v", name, err) - } - - newToken := structs.ACLToken{ - AccessorID: accessor, - SecretID: secretID, - Description: name, - Policies: []structs.ACLTokenPolicyLink{ - { - ID: structs.ACLPolicyGlobalManagementID, - }, - }, - CreateTime: time.Now(), - Local: false, - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - } - - newToken.SetHash(true) - - req := structs.ACLTokenBatchSetRequest{ - Tokens: structs.ACLTokens{&newToken}, - CAS: false, - } - if _, err := s.raftApply(structs.ACLTokenSetRequestType, &req); err != nil { - return fmt.Errorf("failed to create %s: %v", name, err) - } - - s.logger.Info("Created ACL token", "description", name) - - return nil -} - -func (s *Server) deleteManagementToken(secretId string) error { - state := s.fsm.State() - - // Fetch the token to get its accessor ID and to verify that it's a management token - _, token, err := state.ACLTokenGetBySecret(nil, secretId, nil) - if err != nil { - return fmt.Errorf("failed to get management token: %v", err) - } - - if token == nil { - // token is already deleted - return nil - } - - accessorID := token.AccessorID - if len(token.Policies) != 1 && token.Policies[0].ID != structs.ACLPolicyGlobalManagementID { - return fmt.Errorf("failed to delete management token: not a management token") - } - - // Delete the token - req := structs.ACLTokenBatchDeleteRequest{ - TokenIDs: []string{accessorID}, - } - if _, err := s.raftApply(structs.ACLTokenDeleteRequestType, &req); err != nil { - return fmt.Errorf("failed to delete management token: %v", err) - } - - s.logger.Info("deleted ACL token", "description", token.Description) - - return nil -} - func (s *Server) insertAnonymousToken() error { state := s.fsm.State() _, token, err := state.ACLTokenGetBySecret(nil, anonymousToken, nil) @@ -1045,21 +944,13 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *acl.Ent } // Attempt to reap this member - if err := s.registrator.HandleReapMember(member, nodeEntMeta, s.removeConsulServer); err != nil { + if err := s.handleReapMember(member, nodeEntMeta); err != nil { return err } } return nil } -// ConsulRegistrator is an interface that manages the catalog registration lifecycle of Consul servers from serf events. -type ConsulRegistrator interface { - HandleAliveMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, joinServer func(m serf.Member, parts *metadata.Server) error) error - HandleFailedMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error - HandleLeftMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error - HandleReapMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error -} - // reconcileMember is used to do an async reconcile of a single // serf member func (s *Server) reconcileMember(member serf.Member) error { @@ -1078,13 +969,13 @@ func (s *Server) reconcileMember(member serf.Member) error { var err error switch member.Status { case serf.StatusAlive: - err = s.registrator.HandleAliveMember(member, nodeEntMeta, s.joinConsulServer) + err = s.handleAliveMember(member, nodeEntMeta) case serf.StatusFailed: - err = s.registrator.HandleFailedMember(member, nodeEntMeta) + err = s.handleFailedMember(member, nodeEntMeta) case serf.StatusLeft: - err = s.registrator.HandleLeftMember(member, nodeEntMeta, s.removeConsulServer) + err = s.handleLeftMember(member, nodeEntMeta) case StatusReap: - err = s.registrator.HandleReapMember(member, nodeEntMeta, s.removeConsulServer) + err = s.handleReapMember(member, nodeEntMeta) } if err != nil { s.logger.Error("failed to reconcile member", @@ -1115,6 +1006,254 @@ func (s *Server) shouldHandleMember(member serf.Member) bool { return false } +// handleAliveMember is used to ensure the node +// is registered, with a passing health check. +func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { + if nodeEntMeta == nil { + nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() + } + + // Register consul service if a server + var service *structs.NodeService + if valid, parts := metadata.IsConsulServer(member); valid { + service = &structs.NodeService{ + ID: structs.ConsulServiceID, + Service: structs.ConsulServiceName, + Port: parts.Port, + Weights: &structs.Weights{ + Passing: 1, + Warning: 1, + }, + EnterpriseMeta: *nodeEntMeta, + Meta: map[string]string{ + // DEPRECATED - remove nonvoter in favor of read_replica in a future version of consul + "non_voter": strconv.FormatBool(member.Tags["nonvoter"] == "1"), + "read_replica": strconv.FormatBool(member.Tags["read_replica"] == "1"), + "raft_version": strconv.Itoa(parts.RaftVersion), + "serf_protocol_current": strconv.FormatUint(uint64(member.ProtocolCur), 10), + "serf_protocol_min": strconv.FormatUint(uint64(member.ProtocolMin), 10), + "serf_protocol_max": strconv.FormatUint(uint64(member.ProtocolMax), 10), + "version": parts.Build.String(), + }, + } + + if parts.ExternalGRPCPort > 0 { + service.Meta["grpc_port"] = strconv.Itoa(parts.ExternalGRPCPort) + } + if parts.ExternalGRPCTLSPort > 0 { + service.Meta["grpc_tls_port"] = strconv.Itoa(parts.ExternalGRPCTLSPort) + } + + // Attempt to join the consul server + if err := s.joinConsulServer(member, parts); err != nil { + return err + } + } + + // Check if the node exists + state := s.fsm.State() + _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) + if err != nil { + return err + } + if node != nil && node.Address == member.Addr.String() { + // Check if the associated service is available + if service != nil { + match := false + _, services, err := state.NodeServices(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) + if err != nil { + return err + } + if services != nil { + for id, serv := range services.Services { + if id == service.ID { + // If metadata are different, be sure to update it + match = reflect.DeepEqual(serv.Meta, service.Meta) + } + } + } + if !match { + goto AFTER_CHECK + } + } + + // Check if the serfCheck is in the passing state + _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) + if err != nil { + return err + } + for _, check := range checks { + if check.CheckID == structs.SerfCheckID && check.Status == api.HealthPassing { + return nil + } + } + } +AFTER_CHECK: + s.logger.Info("member joined, marking health alive", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + + // Get consul version from serf member + // add this as node meta in catalog register request + buildVersion, err := metadata.Build(&member) + if err != nil { + return err + } + + // Register with the catalog. + req := structs.RegisterRequest{ + Datacenter: s.config.Datacenter, + Node: member.Name, + ID: types.NodeID(member.Tags["id"]), + Address: member.Addr.String(), + Service: service, + Check: &structs.HealthCheck{ + Node: member.Name, + CheckID: structs.SerfCheckID, + Name: structs.SerfCheckName, + Status: api.HealthPassing, + Output: structs.SerfCheckAliveOutput, + }, + EnterpriseMeta: *nodeEntMeta, + NodeMeta: map[string]string{ + structs.MetaConsulVersion: buildVersion.String(), + }, + } + if node != nil { + req.TaggedAddresses = node.TaggedAddresses + req.NodeMeta = node.Meta + } + + _, err = s.raftApply(structs.RegisterRequestType, &req) + return err +} + +// handleFailedMember is used to mark the node's status +// as being critical, along with all checks as unknown. +func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { + if nodeEntMeta == nil { + nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() + } + + // Check if the node exists + state := s.fsm.State() + _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) + if err != nil { + return err + } + + if node == nil { + s.logger.Info("ignoring failed event for member because it does not exist in the catalog", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + return nil + } + + if node.Address == member.Addr.String() { + // Check if the serfCheck is in the critical state + _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) + if err != nil { + return err + } + for _, check := range checks { + if check.CheckID == structs.SerfCheckID && check.Status == api.HealthCritical { + return nil + } + } + } + s.logger.Info("member failed, marking health critical", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + + // Register with the catalog + req := structs.RegisterRequest{ + Datacenter: s.config.Datacenter, + Node: member.Name, + EnterpriseMeta: *nodeEntMeta, + ID: types.NodeID(member.Tags["id"]), + Address: member.Addr.String(), + Check: &structs.HealthCheck{ + Node: member.Name, + CheckID: structs.SerfCheckID, + Name: structs.SerfCheckName, + Status: api.HealthCritical, + Output: structs.SerfCheckFailedOutput, + }, + + // If there's existing information about the node, do not + // clobber it. + SkipNodeUpdate: true, + } + _, err = s.raftApply(structs.RegisterRequestType, &req) + return err +} + +// handleLeftMember is used to handle members that gracefully +// left. They are deregistered if necessary. +func (s *Server) handleLeftMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { + return s.handleDeregisterMember("left", member, nodeEntMeta) +} + +// handleReapMember is used to handle members that have been +// reaped after a prolonged failure. They are deregistered. +func (s *Server) handleReapMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { + return s.handleDeregisterMember("reaped", member, nodeEntMeta) +} + +// handleDeregisterMember is used to deregister a member of a given reason +func (s *Server) handleDeregisterMember(reason string, member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { + if nodeEntMeta == nil { + nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() + } + + // Do not deregister ourself. This can only happen if the current leader + // is leaving. Instead, we should allow a follower to take-over and + // deregister us later. + // + // TODO(partitions): check partitions here too? server names should be unique in general though + if strings.EqualFold(member.Name, s.config.NodeName) { + s.logger.Warn("deregistering self should be done by follower", + "name", s.config.NodeName, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + return nil + } + + // Remove from Raft peers if this was a server + if valid, _ := metadata.IsConsulServer(member); valid { + if err := s.removeConsulServer(member); err != nil { + return err + } + } + + // Check if the node does not exist + state := s.fsm.State() + _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) + if err != nil { + return err + } + if node == nil { + return nil + } + + // Deregister the node + s.logger.Info("deregistering member", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + "reason", reason, + ) + req := structs.DeregisterRequest{ + Datacenter: s.config.Datacenter, + Node: member.Name, + EnterpriseMeta: *nodeEntMeta, + } + _, err = s.raftApply(structs.DeregisterRequestType, &req) + return err +} + // joinConsulServer is used to try to join another consul server func (s *Server) joinConsulServer(m serf.Member, parts *metadata.Server) error { // Check for possibility of multiple bootstrap nodes @@ -1310,121 +1449,3 @@ func (s *serversIntentionsAsConfigEntriesInfo) update(srv *metadata.Server) bool // prevent continuing server evaluation return false } - -func (s *Server) initConsulService(ctx context.Context, client pbresource.ResourceServiceClient) error { - service := &pbcatalog.Service{ - Workloads: &pbcatalog.WorkloadSelector{ - Prefixes: []string{consulWorkloadPrefix}, - }, - Ports: []*pbcatalog.ServicePort{ - { - TargetPort: consulPortNameServer, - Protocol: pbcatalog.Protocol_PROTOCOL_TCP, - // No virtual port defined for now, as we assume this is generally for Service Discovery - }, - }, - } - - serviceData, err := anypb.New(service) - if err != nil { - return fmt.Errorf("could not convert Service to `any` message: %w", err) - } - - // create a default namespace in default partition - serviceID := &pbresource.ID{ - Type: pbcatalog.ServiceType, - Name: structs.ConsulServiceName, - Tenancy: resource.DefaultNamespacedTenancy(), - } - - serviceResource := &pbresource.Resource{ - Id: serviceID, - Data: serviceData, - } - - res, err := client.Read(ctx, &pbresource.ReadRequest{Id: serviceID}) - if err != nil && !grpcNotFoundErr(err) { - return fmt.Errorf("failed to read the %s Service: %w", structs.ConsulServiceName, err) - } - - if err == nil { - existingService := res.GetResource() - s.logger.Debug("existingService consul Service found") - - // If the Service is identical, we're done. - if cmp.Equal(serviceResource, existingService, resourceCmpOptions...) { - s.logger.Debug("no updates to perform on consul Service") - return nil - } - - // If the existing Service is different, add the Version to the patch for CAS write. - serviceResource.Id = existingService.Id - serviceResource.Version = existingService.Version - } - - _, err = client.Write(ctx, &pbresource.WriteRequest{Resource: serviceResource}) - if err != nil { - return fmt.Errorf("failed to create the %s service: %w", structs.ConsulServiceName, err) - } - - s.logger.Info("Created consul Service in catalog") - return nil -} - -func (s *Server) initTenancy(ctx context.Context, b storage.Backend) error { - // we write these defaults directly to the storage backend - // without going through the resource service since tenancy - // validation hooks block writes to the default namespace - // and partition. - if err := s.createDefaultPartition(ctx, b); err != nil { - return err - } - - if err := s.createDefaultNamespace(ctx, b); err != nil { - return err - } - return nil -} - -func (s *Server) createDefaultNamespace(ctx context.Context, b storage.Backend) error { - readID := &pbresource.ID{ - Type: pbtenancy.NamespaceType, - Name: resource.DefaultNamespaceName, - Tenancy: resource.DefaultPartitionedTenancy(), - } - - read, err := b.Read(ctx, storage.StrongConsistency, readID) - - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("failed to read the %q namespace: %v", resource.DefaultNamespaceName, err) - } - if read == nil && errors.Is(err, storage.ErrNotFound) { - nsData, err := anypb.New(&pbtenancy.Namespace{Description: "default namespace in default partition"}) - if err != nil { - return err - } - - // create a default namespace in default partition - nsID := &pbresource.ID{ - Type: pbtenancy.NamespaceType, - Name: resource.DefaultNamespaceName, - Tenancy: resource.DefaultPartitionedTenancy(), - Uid: ulid.Make().String(), - } - - _, err = b.WriteCAS(ctx, &pbresource.Resource{ - Id: nsID, - Generation: ulid.Make().String(), - Data: nsData, - Metadata: map[string]string{ - "generated_at": time.Now().Format(time.RFC3339), - }, - }) - - if err != nil { - return fmt.Errorf("failed to create the %q namespace: %v", resource.DefaultNamespaceName, err) - } - } - s.logger.Info("Created", "namespace", resource.DefaultNamespaceName) - return nil -} diff --git a/agent/consul/leader_ce.go b/agent/consul/leader_ce.go deleted file mode 100644 index 2d67b7bdedd8f..0000000000000 --- a/agent/consul/leader_ce.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package consul - -import ( - "context" - - "github.com/hashicorp/consul/internal/storage" -) - -func (s *Server) createDefaultPartition(ctx context.Context, b storage.Backend) error { - // no-op - return nil -} diff --git a/agent/consul/leader_ce_test.go b/agent/consul/leader_ce_test.go index 86da505c3a08e..7ff6f64ee193d 100644 --- a/agent/consul/leader_ce_test.go +++ b/agent/consul/leader_ce_test.go @@ -1,23 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/internal/storage" - libserf "github.com/hashicorp/consul/lib/serf" - "github.com/hashicorp/consul/proto-public/pbresource" - pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" - "github.com/hashicorp/consul/testrpc" -) +import libserf "github.com/hashicorp/consul/lib/serf" func updateSerfTags(s *Server, key, value string) { libserf.UpdateTag(s.serfLAN, key, value) @@ -26,41 +15,3 @@ func updateSerfTags(s *Server, key, value string) { libserf.UpdateTag(s.serfWAN, key, value) } } - -func TestServer_InitTenancy(t *testing.T) { - t.Parallel() - - _, conf := testServerConfig(t) - deps := newDefaultDeps(t, conf) - deps.Experiments = []string{"v2tenancy"} - deps.Registry = NewTypeRegistry() - - s, err := newServerWithDeps(t, conf, deps) - require.NoError(t, err) - - // first initTenancy call happens here - waitForLeaderEstablishment(t, s) - testrpc.WaitForLeader(t, s.RPC, "dc1") - - nsID := &pbresource.ID{ - Type: pbtenancy.NamespaceType, - Tenancy: resource.DefaultPartitionedTenancy(), - Name: resource.DefaultNamespaceName, - } - - ns, err := s.storageBackend.Read(context.Background(), storage.StrongConsistency, nsID) - require.NoError(t, err) - require.Equal(t, resource.DefaultNamespaceName, ns.Id.Name) - - // explicitly call initiTenancy to verify we do not re-create namespace - err = s.initTenancy(context.Background(), s.storageBackend) - require.NoError(t, err) - - // read again - actual, err := s.storageBackend.Read(context.Background(), storage.StrongConsistency, nsID) - require.NoError(t, err) - - require.Equal(t, ns.Id.Uid, actual.Id.Uid) - require.Equal(t, ns.Generation, actual.Generation) - require.Equal(t, ns.Version, actual.Version) -} diff --git a/agent/consul/leader_connect.go b/agent/consul/leader_connect.go index 794820786f576..f872508bbcf90 100644 --- a/agent/consul/leader_connect.go +++ b/agent/consul/leader_connect.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/leader_connect_ca.go b/agent/consul/leader_connect_ca.go index 92cdf40a6abd2..717c9ff0b2544 100644 --- a/agent/consul/leader_connect_ca.go +++ b/agent/consul/leader_connect_ca.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -1436,7 +1436,6 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au if err != nil { return nil, err } - c.logger.Trace("authorizing and signing cert", "spiffeID", spiffeID) // Perform authorization. var authzContext acl.AuthorizerContext @@ -1455,11 +1454,6 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+ "we are %s", v.Datacenter, dc) } - case *connect.SpiffeIDWorkloadIdentity: - v.GetEnterpriseMeta().FillAuthzContext(&authzContext) - if err := allow.IdentityWriteAllowed(v.WorkloadIdentity, &authzContext); err != nil { - return nil, err - } case *connect.SpiffeIDAgent: v.GetEnterpriseMeta().FillAuthzContext(&authzContext) if err := allow.NodeWriteAllowed(v.Agent, &authzContext); err != nil { @@ -1493,7 +1487,6 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au "we are %s", v.Datacenter, dc) } default: - c.logger.Trace("spiffe ID type is not expected", "spiffeID", spiffeID, "spiffeIDType", v) return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service, mesh-gateway, or agent ID") } @@ -1520,7 +1513,6 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent) serverID, isServer := spiffeID.(*connect.SpiffeIDServer) mgwID, isMeshGateway := spiffeID.(*connect.SpiffeIDMeshGateway) - wID, isWorkloadIdentity := spiffeID.(*connect.SpiffeIDWorkloadIdentity) var entMeta acl.EnterpriseMeta switch { @@ -1530,12 +1522,7 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne "we are %s", serviceID.Host, signingID.Host()) } entMeta.Merge(serviceID.GetEnterpriseMeta()) - case isWorkloadIdentity: - if !signingID.CanSign(spiffeID) { - return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+ - "we are %s", wID.TrustDomain, signingID.Host()) - } - entMeta.Merge(wID.GetEnterpriseMeta()) + case isMeshGateway: if !signingID.CanSign(spiffeID) { return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+ @@ -1658,9 +1645,6 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne case isService: reply.Service = serviceID.Service reply.ServiceURI = cert.URIs[0].String() - case isWorkloadIdentity: - reply.WorkloadIdentity = wID.WorkloadIdentity - reply.WorkloadIdentityURI = cert.URIs[0].String() case isMeshGateway: reply.Kind = structs.ServiceKindMeshGateway reply.KindURI = cert.URIs[0].String() diff --git a/agent/consul/leader_connect_ca_test.go b/agent/consul/leader_connect_ca_test.go index e372c010a7064..e1c2cf8506c29 100644 --- a/agent/consul/leader_connect_ca_test.go +++ b/agent/consul/leader_connect_ca_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -566,7 +566,7 @@ func TestCAManager_Initialize_Logging(t *testing.T) { deps := newDefaultDeps(t, conf1) deps.Logger = logger - s1, err := NewServer(conf1, deps, grpc.NewServer(), nil, logger, nil) + s1, err := NewServer(conf1, deps, grpc.NewServer(), nil, logger) require.NoError(t, err) defer s1.Shutdown() testrpc.WaitForLeader(t, s1.RPC, "dc1") @@ -1317,12 +1317,6 @@ func TestCAManager_AuthorizeAndSignCertificate(t *testing.T) { Host: "test-host", Partition: "test-partition", }.URI() - identityURL := connect.SpiffeIDWorkloadIdentity{ - TrustDomain: "test-trust-domain", - Partition: "test-partition", - Namespace: "test-namespace", - WorkloadIdentity: "test-workload-identity", - }.URI() tests := []struct { name string @@ -1418,15 +1412,6 @@ func TestCAManager_AuthorizeAndSignCertificate(t *testing.T) { } }, }, - { - name: "err_identity_write_not_allowed", - expectErr: "Permission denied", - getCSR: func() *x509.CertificateRequest { - return &x509.CertificateRequest{ - URIs: []*url.URL{identityURL}, - } - }, - }, } for _, tc := range tests { diff --git a/agent/consul/leader_connect_test.go b/agent/consul/leader_connect_test.go index dbee808de99eb..e57107a242f52 100644 --- a/agent/consul/leader_connect_test.go +++ b/agent/consul/leader_connect_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/leader_federation_state_ae.go b/agent/consul/leader_federation_state_ae.go index 870dc5460e2b0..fa46bea770b3a 100644 --- a/agent/consul/leader_federation_state_ae.go +++ b/agent/consul/leader_federation_state_ae.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/leader_federation_state_ae_test.go b/agent/consul/leader_federation_state_ae_test.go index ca5ac47b7a974..ef3333da31dac 100644 --- a/agent/consul/leader_federation_state_ae_test.go +++ b/agent/consul/leader_federation_state_ae_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/leader_intentions.go b/agent/consul/leader_intentions.go index 52736838aec81..cf0844d3ff5cd 100644 --- a/agent/consul/leader_intentions.go +++ b/agent/consul/leader_intentions.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/leader_intentions_ce.go b/agent/consul/leader_intentions_ce.go index 2d997b2329498..83880d31de371 100644 --- a/agent/consul/leader_intentions_ce.go +++ b/agent/consul/leader_intentions_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/leader_intentions_ce_test.go b/agent/consul/leader_intentions_ce_test.go index e97c45a320cf1..7d144fb2e98e7 100644 --- a/agent/consul/leader_intentions_ce_test.go +++ b/agent/consul/leader_intentions_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/leader_intentions_test.go b/agent/consul/leader_intentions_test.go index fc868bc747fc1..2de1b97dd7e10 100644 --- a/agent/consul/leader_intentions_test.go +++ b/agent/consul/leader_intentions_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/leader_log_verification.go b/agent/consul/leader_log_verification.go index 32a23dd3f5c06..ef32ce17904ea 100644 --- a/agent/consul/leader_log_verification.go +++ b/agent/consul/leader_log_verification.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/leader_metrics.go b/agent/consul/leader_metrics.go index e210b2ffd9333..188e409e3bbd1 100644 --- a/agent/consul/leader_metrics.go +++ b/agent/consul/leader_metrics.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/leader_metrics_test.go b/agent/consul/leader_metrics_test.go index e3636e1bcf8c9..96e7a0d75d971 100644 --- a/agent/consul/leader_metrics_test.go +++ b/agent/consul/leader_metrics_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index 0f58ed08f491e..32f220164b64c 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index ae5d2ae83e813..0787115ca70ac 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -1426,7 +1426,7 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) { require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: p2})) // connect the stream - mst1, err := s2.peerStreamServer.Tracker.Connected(s2PeerID1) + mst1, err := s2.peeringServer.Tracker.Connected(s2PeerID1) require.NoError(t, err) // mimic tracking exported services @@ -1437,7 +1437,7 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) { }) // connect the stream - mst2, err := s2.peerStreamServer.Tracker.Connected(s2PeerID2) + mst2, err := s2.peeringServer.Tracker.Connected(s2PeerID2) require.NoError(t, err) // mimic tracking exported services diff --git a/agent/consul/leader_registrator_v1.go b/agent/consul/leader_registrator_v1.go deleted file mode 100644 index 6c131a44d9b6c..0000000000000 --- a/agent/consul/leader_registrator_v1.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package consul - -import ( - "reflect" - "strconv" - "strings" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/serf/serf" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/consul/fsm" - "github.com/hashicorp/consul/agent/metadata" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/types" -) - -var _ ConsulRegistrator = (*V1ConsulRegistrator)(nil) - -type V1ConsulRegistrator struct { - Datacenter string - FSM *fsm.FSM - Logger hclog.Logger - NodeName string - - RaftApplyFunc func(t structs.MessageType, msg any) (any, error) -} - -// HandleAliveMember is used to ensure the node -// is registered, with a passing health check. -func (r V1ConsulRegistrator) HandleAliveMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, joinServer func(m serf.Member, parts *metadata.Server) error) error { - if nodeEntMeta == nil { - nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() - } - - // Register consul service if a server - var service *structs.NodeService - if valid, parts := metadata.IsConsulServer(member); valid { - service = &structs.NodeService{ - ID: structs.ConsulServiceID, - Service: structs.ConsulServiceName, - Port: parts.Port, - Weights: &structs.Weights{ - Passing: 1, - Warning: 1, - }, - EnterpriseMeta: *nodeEntMeta, - Meta: map[string]string{ - // DEPRECATED - remove nonvoter in favor of read_replica in a future version of consul - "non_voter": strconv.FormatBool(member.Tags["nonvoter"] == "1"), - "read_replica": strconv.FormatBool(member.Tags["read_replica"] == "1"), - "raft_version": strconv.Itoa(parts.RaftVersion), - "serf_protocol_current": strconv.FormatUint(uint64(member.ProtocolCur), 10), - "serf_protocol_min": strconv.FormatUint(uint64(member.ProtocolMin), 10), - "serf_protocol_max": strconv.FormatUint(uint64(member.ProtocolMax), 10), - "version": parts.Build.String(), - }, - } - - if parts.ExternalGRPCPort > 0 { - service.Meta["grpc_port"] = strconv.Itoa(parts.ExternalGRPCPort) - } - if parts.ExternalGRPCTLSPort > 0 { - service.Meta["grpc_tls_port"] = strconv.Itoa(parts.ExternalGRPCTLSPort) - } - - // Attempt to join the consul server - if err := joinServer(member, parts); err != nil { - return err - } - } - - // Check if the node exists - state := r.FSM.State() - _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) - if err != nil { - return err - } - if node != nil && node.Address == member.Addr.String() { - // Check if the associated service is available - if service != nil { - match := false - _, services, err := state.NodeServices(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) - if err != nil { - return err - } - if services != nil { - for id, serv := range services.Services { - if id == service.ID { - // If metadata are different, be sure to update it - match = reflect.DeepEqual(serv.Meta, service.Meta) - } - } - } - if !match { - goto AFTER_CHECK - } - } - - // Check if the serfCheck is in the passing state - _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) - if err != nil { - return err - } - for _, check := range checks { - if check.CheckID == structs.SerfCheckID && check.Status == api.HealthPassing { - return nil - } - } - } -AFTER_CHECK: - r.Logger.Info("member joined, marking health alive", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - - // Get consul version from serf member - // add this as node meta in catalog register request - buildVersion, err := metadata.Build(&member) - if err != nil { - return err - } - - // Register with the catalog. - req := structs.RegisterRequest{ - Datacenter: r.Datacenter, - Node: member.Name, - ID: types.NodeID(member.Tags["id"]), - Address: member.Addr.String(), - Service: service, - Check: &structs.HealthCheck{ - Node: member.Name, - CheckID: structs.SerfCheckID, - Name: structs.SerfCheckName, - Status: api.HealthPassing, - Output: structs.SerfCheckAliveOutput, - }, - EnterpriseMeta: *nodeEntMeta, - NodeMeta: map[string]string{ - structs.MetaConsulVersion: buildVersion.String(), - }, - } - if node != nil { - req.TaggedAddresses = node.TaggedAddresses - req.NodeMeta = node.Meta - } - - _, err = r.RaftApplyFunc(structs.RegisterRequestType, &req) - return err -} - -// HandleFailedMember is used to mark the node's status -// as being critical, along with all checks as unknown. -func (r V1ConsulRegistrator) HandleFailedMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { - if nodeEntMeta == nil { - nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() - } - - // Check if the node exists - state := r.FSM.State() - _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) - if err != nil { - return err - } - - if node == nil { - r.Logger.Info("ignoring failed event for member because it does not exist in the catalog", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - return nil - } - - if node.Address == member.Addr.String() { - // Check if the serfCheck is in the critical state - _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) - if err != nil { - return err - } - for _, check := range checks { - if check.CheckID == structs.SerfCheckID && check.Status == api.HealthCritical { - return nil - } - } - } - r.Logger.Info("member failed, marking health critical", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - - // Register with the catalog - req := structs.RegisterRequest{ - Datacenter: r.Datacenter, - Node: member.Name, - EnterpriseMeta: *nodeEntMeta, - ID: types.NodeID(member.Tags["id"]), - Address: member.Addr.String(), - Check: &structs.HealthCheck{ - Node: member.Name, - CheckID: structs.SerfCheckID, - Name: structs.SerfCheckName, - Status: api.HealthCritical, - Output: structs.SerfCheckFailedOutput, - }, - - // If there's existing information about the node, do not - // clobber it. - SkipNodeUpdate: true, - } - _, err = r.RaftApplyFunc(structs.RegisterRequestType, &req) - return err -} - -// HandleLeftMember is used to handle members that gracefully -// left. They are deregistered if necessary. -func (r V1ConsulRegistrator) HandleLeftMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error { - return r.handleDeregisterMember("left", member, nodeEntMeta, removeServerFunc) -} - -// HandleReapMember is used to handle members that have been -// reaped after a prolonged failure. They are deregistered. -func (r V1ConsulRegistrator) HandleReapMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error { - return r.handleDeregisterMember("reaped", member, nodeEntMeta, removeServerFunc) -} - -// handleDeregisterMember is used to deregister a member of a given reason -func (r V1ConsulRegistrator) handleDeregisterMember(reason string, member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error { - if nodeEntMeta == nil { - nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() - } - - // Do not deregister ourself. This can only happen if the current leader - // is leaving. Instead, we should allow a follower to take-over and - // deregister us later. - // - // TODO(partitions): check partitions here too? server names should be unique in general though - if strings.EqualFold(member.Name, r.NodeName) { - r.Logger.Warn("deregistering self should be done by follower", - "name", r.NodeName, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - return nil - } - - // Remove from Raft peers if this was a server - if valid, _ := metadata.IsConsulServer(member); valid { - if err := removeServerFunc(member); err != nil { - return err - } - } - - // Check if the node does not exist - state := r.FSM.State() - _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) - if err != nil { - return err - } - if node == nil { - return nil - } - - // Deregister the node - r.Logger.Info("deregistering member", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - "reason", reason, - ) - req := structs.DeregisterRequest{ - Datacenter: r.Datacenter, - Node: member.Name, - EnterpriseMeta: *nodeEntMeta, - } - _, err = r.RaftApplyFunc(structs.DeregisterRequestType, &req) - return err -} diff --git a/agent/consul/leader_registrator_v1_test.go b/agent/consul/leader_registrator_v1_test.go deleted file mode 100644 index 276e3b7c8ed8f..0000000000000 --- a/agent/consul/leader_registrator_v1_test.go +++ /dev/null @@ -1,887 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package consul - -import ( - "context" - "os" - "strconv" - "strings" - "testing" - "time" - - "github.com/hashicorp/serf/serf" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/freeport" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/hashicorp/consul/testrpc" -) - -func TestLeader_RegisterMember(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "deny" - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - dir2, c1 := testClient(t) - defer os.RemoveAll(dir2) - defer c1.Shutdown() - - // Try to join - joinLAN(t, c1, s1) - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // Client should be registered - state := s1.fsm.State() - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if node == nil { - r.Fatal("client not registered") - } - }) - - // Should have a check - _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") - if err != nil { - t.Fatalf("err: %v", err) - } - if len(checks) != 1 { - t.Fatalf("client missing check") - } - if checks[0].CheckID != structs.SerfCheckID { - t.Fatalf("bad check: %v", checks[0]) - } - if checks[0].Name != structs.SerfCheckName { - t.Fatalf("bad check: %v", checks[0]) - } - if checks[0].Status != api.HealthPassing { - t.Fatalf("bad check: %v", checks[0]) - } - - // Server should be registered - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(s1.config.NodeName, nil, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if node == nil { - r.Fatalf("server not registered") - } - }) - - // Service should be registered - _, services, err := state.NodeServices(nil, s1.config.NodeName, nil, "") - if err != nil { - t.Fatalf("err: %v", err) - } - if _, ok := services.Services["consul"]; !ok { - t.Fatalf("consul service not registered: %v", services) - } -} - -func TestLeader_FailedMember(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "deny" - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - dir2, c1 := testClient(t) - defer os.RemoveAll(dir2) - defer c1.Shutdown() - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // Try to join - joinLAN(t, c1, s1) - - // Fail the member - c1.Shutdown() - - // Should be registered - state := s1.fsm.State() - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if node == nil { - r.Fatal("client not registered") - } - }) - - // Should have a check - _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") - if err != nil { - t.Fatalf("err: %v", err) - } - if len(checks) != 1 { - t.Fatalf("client missing check") - } - if checks[0].CheckID != structs.SerfCheckID { - t.Fatalf("bad check: %v", checks[0]) - } - if checks[0].Name != structs.SerfCheckName { - t.Fatalf("bad check: %v", checks[0]) - } - - retry.Run(t, func(r *retry.R) { - _, checks, err = state.NodeChecks(nil, c1.config.NodeName, nil, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if len(checks) != 1 { - r.Fatalf("client missing check") - } - if got, want := checks[0].Status, api.HealthCritical; got != want { - r.Fatalf("got status %q want %q", got, want) - } - }) -} - -func TestLeader_LeftMember(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "deny" - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - dir2, c1 := testClient(t) - defer os.RemoveAll(dir2) - defer c1.Shutdown() - - // Try to join - joinLAN(t, c1, s1) - - state := s1.fsm.State() - - // Should be registered - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil, "") - require.NoError(r, err) - require.NotNil(r, node, "client not registered") - }) - - // Node should leave - c1.Leave() - c1.Shutdown() - - // Should be deregistered - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil, "") - require.NoError(r, err) - require.Nil(r, node, "client still registered") - }) -} - -func TestLeader_ReapMember(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "deny" - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - dir2, c1 := testClient(t) - defer os.RemoveAll(dir2) - defer c1.Shutdown() - - // Try to join - joinLAN(t, c1, s1) - - state := s1.fsm.State() - - // Should be registered - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil, "") - require.NoError(r, err) - require.NotNil(r, node, "client not registered") - }) - - // Simulate a node reaping - mems := s1.LANMembersInAgentPartition() - var c1mem serf.Member - for _, m := range mems { - if m.Name == c1.config.NodeName { - c1mem = m - c1mem.Status = StatusReap - break - } - } - s1.reconcileCh <- c1mem - - // Should be deregistered; we have to poll quickly here because - // anti-entropy will put it back. - reaped := false - for start := time.Now(); time.Since(start) < 5*time.Second; { - _, node, err := state.GetNode(c1.config.NodeName, nil, "") - require.NoError(t, err) - if node == nil { - reaped = true - break - } - } - if !reaped { - t.Fatalf("client should not be registered") - } -} - -func TestLeader_ReapOrLeftMember_IgnoreSelf(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - - run := func(t *testing.T, status serf.MemberStatus, nameFn func(string) string) { - t.Parallel() - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "deny" - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - nodeName := s1.config.NodeName - if nameFn != nil { - nodeName = nameFn(nodeName) - } - - state := s1.fsm.State() - - // Should be registered - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(nodeName, nil, "") - require.NoError(r, err) - require.NotNil(r, node, "server not registered") - }) - - // Simulate THIS node reaping or leaving - mems := s1.LANMembersInAgentPartition() - var s1mem serf.Member - for _, m := range mems { - if strings.EqualFold(m.Name, nodeName) { - s1mem = m - s1mem.Status = status - s1mem.Name = nodeName - break - } - } - s1.reconcileCh <- s1mem - - // Should NOT be deregistered; we have to poll quickly here because - // anti-entropy will put it back if it did get deleted. - reaped := false - for start := time.Now(); time.Since(start) < 5*time.Second; { - _, node, err := state.GetNode(nodeName, nil, "") - require.NoError(t, err) - if node == nil { - reaped = true - break - } - } - if reaped { - t.Fatalf("server should still be registered") - } - } - - t.Run("original name", func(t *testing.T) { - t.Parallel() - t.Run("left", func(t *testing.T) { - run(t, serf.StatusLeft, nil) - }) - t.Run("reap", func(t *testing.T) { - run(t, StatusReap, nil) - }) - }) - - t.Run("uppercased name", func(t *testing.T) { - t.Parallel() - t.Run("left", func(t *testing.T) { - run(t, serf.StatusLeft, strings.ToUpper) - }) - t.Run("reap", func(t *testing.T) { - run(t, StatusReap, strings.ToUpper) - }) - }) -} - -func TestLeader_CheckServersMeta(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - t.Parallel() - - ports := freeport.GetN(t, 2) // s3 grpc, s3 grpc_tls - - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "allow" - c.Bootstrap = true - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - dir2, s2 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "allow" - c.Bootstrap = false - }) - defer os.RemoveAll(dir2) - defer s2.Shutdown() - - dir3, s3 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "allow" - c.Bootstrap = false - c.GRPCPort = ports[0] - c.GRPCTLSPort = ports[1] - }) - defer os.RemoveAll(dir3) - defer s3.Shutdown() - - // Try to join - joinLAN(t, s1, s2) - joinLAN(t, s1, s3) - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - testrpc.WaitForLeader(t, s2.RPC, "dc1") - testrpc.WaitForLeader(t, s3.RPC, "dc1") - state := s1.fsm.State() - - consulService := &structs.NodeService{ - ID: "consul", - Service: "consul", - } - // s3 should be registered - retry.Run(t, func(r *retry.R) { - _, service, err := state.NodeService(nil, s3.config.NodeName, "consul", &consulService.EnterpriseMeta, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if service == nil { - r.Fatal("client not registered") - } - if service.Meta["non_voter"] != "false" { - r.Fatalf("Expected to be non_voter == false, was: %s", service.Meta["non_voter"]) - } - }) - - member := serf.Member{} - for _, m := range s1.serfLAN.Members() { - if m.Name == s3.config.NodeName { - member = m - member.Tags = make(map[string]string) - for key, value := range m.Tags { - member.Tags[key] = value - } - } - } - if member.Name != s3.config.NodeName { - t.Fatal("could not find node in serf members") - } - versionToExpect := "19.7.9" - - retry.Run(t, func(r *retry.R) { - // DEPRECATED - remove nonvoter tag in favor of read_replica in a future version of consul - member.Tags["nonvoter"] = "1" - member.Tags["read_replica"] = "1" - member.Tags["build"] = versionToExpect - err := s1.registrator.HandleAliveMember(member, nil, s1.joinConsulServer) - if err != nil { - r.Fatalf("Unexpected error :%v", err) - } - _, service, err := state.NodeService(nil, s3.config.NodeName, "consul", &consulService.EnterpriseMeta, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if service == nil { - r.Fatal("client not registered") - } - // DEPRECATED - remove non_voter in favor of read_replica in a future version of consul - if service.Meta["non_voter"] != "true" { - r.Fatalf("Expected to be non_voter == true, was: %s", service.Meta["non_voter"]) - } - if service.Meta["read_replica"] != "true" { - r.Fatalf("Expected to be read_replica == true, was: %s", service.Meta["non_voter"]) - } - newVersion := service.Meta["version"] - if newVersion != versionToExpect { - r.Fatalf("Expected version to be updated to %s, was %s", versionToExpect, newVersion) - } - grpcPort := service.Meta["grpc_port"] - if grpcPort != strconv.Itoa(ports[0]) { - r.Fatalf("Expected grpc port to be %d, was %s", ports[0], grpcPort) - } - grpcTLSPort := service.Meta["grpc_tls_port"] - if grpcTLSPort != strconv.Itoa(ports[1]) { - r.Fatalf("Expected grpc tls port to be %d, was %s", ports[1], grpcTLSPort) - } - }) -} - -func TestLeader_ReapServer(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "allow" - c.Bootstrap = true - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - dir2, s2 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "allow" - c.Bootstrap = false - }) - defer os.RemoveAll(dir2) - defer s2.Shutdown() - - dir3, s3 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "allow" - c.Bootstrap = false - }) - defer os.RemoveAll(dir3) - defer s3.Shutdown() - - // Try to join - joinLAN(t, s1, s2) - joinLAN(t, s1, s3) - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - testrpc.WaitForLeader(t, s2.RPC, "dc1") - testrpc.WaitForLeader(t, s3.RPC, "dc1") - state := s1.fsm.State() - - // s3 should be registered - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(s3.config.NodeName, nil, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if node == nil { - r.Fatal("client not registered") - } - }) - - // call reconcileReaped with a map that does not contain s3 - knownMembers := make(map[string]struct{}) - knownMembers[s1.config.NodeName] = struct{}{} - knownMembers[s2.config.NodeName] = struct{}{} - - err := s1.reconcileReaped(knownMembers, nil) - - if err != nil { - t.Fatalf("Unexpected error :%v", err) - } - // s3 should be deregistered - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(s3.config.NodeName, nil, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if node != nil { - r.Fatalf("server with id %v should not be registered", s3.config.NodeID) - } - }) - -} - -func TestLeader_Reconcile_ReapMember(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "deny" - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // Register a non-existing member - dead := structs.RegisterRequest{ - Datacenter: s1.config.Datacenter, - Node: "no-longer-around", - Address: "127.1.1.1", - Check: &structs.HealthCheck{ - Node: "no-longer-around", - CheckID: structs.SerfCheckID, - Name: structs.SerfCheckName, - Status: api.HealthCritical, - }, - WriteRequest: structs.WriteRequest{ - Token: "root", - }, - } - var out struct{} - if err := s1.RPC(context.Background(), "Catalog.Register", &dead, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Force a reconciliation - if err := s1.reconcile(); err != nil { - t.Fatalf("err: %v", err) - } - - // Node should be gone - state := s1.fsm.State() - _, node, err := state.GetNode("no-longer-around", nil, "") - if err != nil { - t.Fatalf("err: %v", err) - } - if node != nil { - t.Fatalf("client registered") - } -} - -func TestLeader_Reconcile(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - c.ACLsEnabled = true - c.ACLInitialManagementToken = "root" - c.ACLResolverSettings.ACLDefaultPolicy = "deny" - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - dir2, c1 := testClient(t) - defer os.RemoveAll(dir2) - defer c1.Shutdown() - - // Join before we have a leader, this should cause a reconcile! - joinLAN(t, c1, s1) - - // Should not be registered - state := s1.fsm.State() - _, node, err := state.GetNode(c1.config.NodeName, nil, "") - if err != nil { - t.Fatalf("err: %v", err) - } - if node != nil { - t.Fatalf("client registered") - } - - // Should be registered - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if node == nil { - r.Fatal("client not registered") - } - }) -} - -func TestLeader_Reconcile_Races(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - dir2, c1 := testClient(t) - defer os.RemoveAll(dir2) - defer c1.Shutdown() - - joinLAN(t, c1, s1) - - // Wait for the server to reconcile the client and register it. - state := s1.fsm.State() - var nodeAddr string - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if node == nil { - r.Fatal("client not registered") - } - nodeAddr = node.Address - }) - - // Add in some metadata via the catalog (as if the agent synced it - // there). We also set the serfHealth check to failing so the reconcile - // will attempt to flip it back - req := structs.RegisterRequest{ - Datacenter: s1.config.Datacenter, - Node: c1.config.NodeName, - ID: c1.config.NodeID, - Address: nodeAddr, - NodeMeta: map[string]string{"hello": "world"}, - Check: &structs.HealthCheck{ - Node: c1.config.NodeName, - CheckID: structs.SerfCheckID, - Name: structs.SerfCheckName, - Status: api.HealthCritical, - Output: "", - }, - } - var out struct{} - if err := s1.RPC(context.Background(), "Catalog.Register", &req, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Force a reconcile and make sure the metadata stuck around. - if err := s1.reconcile(); err != nil { - t.Fatalf("err: %v", err) - } - _, node, err := state.GetNode(c1.config.NodeName, nil, "") - if err != nil { - t.Fatalf("err: %v", err) - } - if node == nil { - t.Fatalf("bad") - } - if hello, ok := node.Meta["hello"]; !ok || hello != "world" { - t.Fatalf("bad") - } - - // Fail the member and wait for the health to go critical. - c1.Shutdown() - retry.Run(t, func(r *retry.R) { - _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if len(checks) != 1 { - r.Fatalf("client missing check") - } - if got, want := checks[0].Status, api.HealthCritical; got != want { - r.Fatalf("got state %q want %q", got, want) - } - }) - - // Make sure the metadata didn't get clobbered. - _, node, err = state.GetNode(c1.config.NodeName, nil, "") - if err != nil { - t.Fatalf("err: %v", err) - } - if node == nil { - t.Fatalf("bad") - } - if hello, ok := node.Meta["hello"]; !ok || hello != "world" { - t.Fatalf("bad") - } -} - -func TestLeader_LeftServer(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - dir2, s2 := testServerDCBootstrap(t, "dc1", false) - defer os.RemoveAll(dir2) - defer s2.Shutdown() - - dir3, s3 := testServerDCBootstrap(t, "dc1", false) - defer os.RemoveAll(dir3) - defer s3.Shutdown() - - // Put s1 last so we don't trigger a leader election. - servers := []*Server{s2, s3, s1} - - // Try to join - joinLAN(t, s2, s1) - joinLAN(t, s3, s1) - for _, s := range servers { - retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) }) - } - - // Kill any server - servers[0].Shutdown() - - // Force remove the non-leader (transition to left state) - if err := servers[1].RemoveFailedNode(servers[0].config.NodeName, false, nil); err != nil { - t.Fatalf("err: %v", err) - } - - // Wait until the remaining servers show only 2 peers. - for _, s := range servers[1:] { - retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 2)) }) - } - s1.Shutdown() -} - -func TestLeader_LeftLeader(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - dir2, s2 := testServerDCBootstrap(t, "dc1", false) - defer os.RemoveAll(dir2) - defer s2.Shutdown() - - dir3, s3 := testServerDCBootstrap(t, "dc1", false) - defer os.RemoveAll(dir3) - defer s3.Shutdown() - servers := []*Server{s1, s2, s3} - - // Try to join - joinLAN(t, s2, s1) - joinLAN(t, s3, s1) - - for _, s := range servers { - retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) }) - } - - // Kill the leader! - var leader *Server - for _, s := range servers { - if s.IsLeader() { - leader = s - break - } - } - if leader == nil { - t.Fatalf("Should have a leader") - } - if !leader.isReadyForConsistentReads() { - t.Fatalf("Expected leader to be ready for consistent reads ") - } - leader.Leave() - if leader.isReadyForConsistentReads() { - t.Fatalf("Expected consistent read state to be false ") - } - leader.Shutdown() - time.Sleep(100 * time.Millisecond) - - var remain *Server - for _, s := range servers { - if s == leader { - continue - } - remain = s - retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 2)) }) - } - - // Verify the old leader is deregistered - state := remain.fsm.State() - retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(leader.config.NodeName, nil, "") - if err != nil { - r.Fatalf("err: %v", err) - } - if node != nil { - r.Fatal("leader should be deregistered") - } - }) -} - -func TestLeader_MultiBootstrap(t *testing.T) { - t.Parallel() - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - dir2, s2 := testServer(t) - defer os.RemoveAll(dir2) - defer s2.Shutdown() - - servers := []*Server{s1, s2} - - // Try to join - joinLAN(t, s2, s1) - - for _, s := range servers { - retry.Run(t, func(r *retry.R) { - if got, want := len(s.serfLAN.Members()), 2; got != want { - r.Fatalf("got %d peers want %d", got, want) - } - }) - } - - // Ensure we don't have multiple raft peers - for _, s := range servers { - peers, _ := s.autopilot.NumVoters() - if peers != 1 { - t.Fatalf("should only have 1 raft peer!") - } - } -} diff --git a/agent/consul/leader_registrator_v2.go b/agent/consul/leader_registrator_v2.go deleted file mode 100644 index 97465e10d1e7b..0000000000000 --- a/agent/consul/leader_registrator_v2.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package consul - -import ( - "context" - "fmt" - "strconv" - "strings" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/serf/serf" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/testing/protocmp" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/metadata" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/resource" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/types" -) - -const ( - consulWorkloadPrefix = "consul-server-" - consulPortNameServer = "server" -) - -var _ ConsulRegistrator = (*V2ConsulRegistrator)(nil) - -var resourceCmpOptions = []cmp.Option{ - protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), - protocmp.IgnoreFields(&pbresource.ID{}, "uid"), - protocmp.Transform(), - // Stringify any type passed to the sorter so that we can reliably compare most values. - cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }), -} - -type V2ConsulRegistrator struct { - Logger hclog.Logger - NodeName string - EntMeta *acl.EnterpriseMeta - - Client pbresource.ResourceServiceClient -} - -// HandleAliveMember is used to ensure the server is registered as a Workload -// with a passing health check. -func (r V2ConsulRegistrator) HandleAliveMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, joinServer func(m serf.Member, parts *metadata.Server) error) error { - valid, parts := metadata.IsConsulServer(member) - if !valid { - return nil - } - - if nodeEntMeta == nil { - nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() - } - - // Attempt to join the consul server, regardless of the existing catalog state - if err := joinServer(member, parts); err != nil { - return err - } - - r.Logger.Info("member joined, creating catalog entries", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - - workloadResource, err := r.createWorkloadFromMember(member, parts, nodeEntMeta) - if err != nil { - return err - } - - // Check if the Workload already exists and if it's the same - res, err := r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: workloadResource.Id}) - if err != nil && !grpcNotFoundErr(err) { - return fmt.Errorf("error checking for existing Workload %s: %w", workloadResource.Id.Name, err) - } - - if err == nil { - existingWorkload := res.GetResource() - - r.Logger.Debug("existing Workload matching the member found", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - - // If the Workload is identical, move to updating the health status - if cmp.Equal(workloadResource, existingWorkload, resourceCmpOptions...) { - r.Logger.Debug("no updates to perform on member Workload", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - goto HEALTHSTATUS - } - - // If the existing Workload different, add the existing Version into the patch for CAS write - workloadResource.Id = existingWorkload.Id - workloadResource.Version = existingWorkload.Version - } - - if _, err := r.Client.Write(context.TODO(), &pbresource.WriteRequest{Resource: workloadResource}); err != nil { - return fmt.Errorf("failed to write Workload %s: %w", workloadResource.Id.Name, err) - } - - r.Logger.Info("updated consul Workload in catalog", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - -HEALTHSTATUS: - hsResource, err := r.createHealthStatusFromMember(member, workloadResource.Id, true, nodeEntMeta) - if err != nil { - return err - } - - // Check if the HealthStatus already exists and if it's the same - res, err = r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: hsResource.Id}) - if err != nil && !grpcNotFoundErr(err) { - return fmt.Errorf("error checking for existing HealthStatus %s: %w", hsResource.Id.Name, err) - } - - if err == nil { - existingHS := res.GetResource() - - r.Logger.Debug("existing HealthStatus matching the member found", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - - // If the HealthStatus is identical, we're done. - if cmp.Equal(hsResource, existingHS, resourceCmpOptions...) { - r.Logger.Debug("no updates to perform on member HealthStatus", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - return nil - } - - // If the existing HealthStatus is different, add the Version to the patch for CAS write. - hsResource.Id = existingHS.Id - hsResource.Version = existingHS.Version - } - - if _, err := r.Client.Write(context.TODO(), &pbresource.WriteRequest{Resource: hsResource}); err != nil { - return fmt.Errorf("failed to write HealthStatus %s: %w", hsResource.Id.Name, err) - } - r.Logger.Info("updated consul HealthStatus in catalog", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - return nil -} - -func (r V2ConsulRegistrator) createWorkloadFromMember(member serf.Member, parts *metadata.Server, nodeEntMeta *acl.EnterpriseMeta) (*pbresource.Resource, error) { - workloadMeta := map[string]string{ - "read_replica": strconv.FormatBool(member.Tags["read_replica"] == "1"), - "raft_version": strconv.Itoa(parts.RaftVersion), - "serf_protocol_current": strconv.FormatUint(uint64(member.ProtocolCur), 10), - "serf_protocol_min": strconv.FormatUint(uint64(member.ProtocolMin), 10), - "serf_protocol_max": strconv.FormatUint(uint64(member.ProtocolMax), 10), - "version": parts.Build.String(), - } - - if parts.ExternalGRPCPort > 0 { - workloadMeta["grpc_port"] = strconv.Itoa(parts.ExternalGRPCPort) - } - if parts.ExternalGRPCTLSPort > 0 { - workloadMeta["grpc_tls_port"] = strconv.Itoa(parts.ExternalGRPCTLSPort) - } - - workload := &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: member.Addr.String(), Ports: []string{consulPortNameServer}}, - }, - // Don't include identity since Consul is not routable through the mesh. - // Don't include locality because these values are not passed along through serf, and they are probably - // different from the leader's values. - Ports: map[string]*pbcatalog.WorkloadPort{ - consulPortNameServer: { - Port: uint32(parts.Port), - Protocol: pbcatalog.Protocol_PROTOCOL_TCP, - }, - // TODO: add other agent ports - }, - } - - workloadData, err := anypb.New(workload) - if err != nil { - return nil, fmt.Errorf("could not convert Workload to 'any' type: %w", err) - } - - workloadId := &pbresource.ID{ - Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])), - Type: pbcatalog.WorkloadType, - Tenancy: resource.DefaultNamespacedTenancy(), - } - workloadId.Tenancy.Partition = nodeEntMeta.PartitionOrDefault() - - return &pbresource.Resource{ - Id: workloadId, - Data: workloadData, - Metadata: workloadMeta, - }, nil -} - -func (r V2ConsulRegistrator) createHealthStatusFromMember(member serf.Member, workloadId *pbresource.ID, passing bool, nodeEntMeta *acl.EnterpriseMeta) (*pbresource.Resource, error) { - hs := &pbcatalog.HealthStatus{ - Type: string(structs.SerfCheckID), - Description: structs.SerfCheckName, - } - - if passing { - hs.Status = pbcatalog.Health_HEALTH_PASSING - hs.Output = structs.SerfCheckAliveOutput - } else { - hs.Status = pbcatalog.Health_HEALTH_CRITICAL - hs.Output = structs.SerfCheckFailedOutput - } - - hsData, err := anypb.New(hs) - if err != nil { - return nil, fmt.Errorf("could not convert HealthStatus to 'any' type: %w", err) - } - - hsId := &pbresource.ID{ - Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])), - Type: pbcatalog.HealthStatusType, - Tenancy: resource.DefaultNamespacedTenancy(), - } - hsId.Tenancy.Partition = nodeEntMeta.PartitionOrDefault() - - return &pbresource.Resource{ - Id: hsId, - Data: hsData, - Owner: workloadId, - }, nil -} - -// HandleFailedMember is used to mark the workload's associated HealthStatus. -func (r V2ConsulRegistrator) HandleFailedMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { - if valid, _ := metadata.IsConsulServer(member); !valid { - return nil - } - - if nodeEntMeta == nil { - nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() - } - - r.Logger.Info("member failed", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - - // Validate that the associated workload exists - workloadId := &pbresource.ID{ - Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])), - Type: pbcatalog.WorkloadType, - Tenancy: resource.DefaultNamespacedTenancy(), - } - workloadId.Tenancy.Partition = nodeEntMeta.PartitionOrDefault() - - res, err := r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: workloadId}) - if err != nil && !grpcNotFoundErr(err) { - return fmt.Errorf("error checking for existing Workload %s: %w", workloadId.Name, err) - } - if grpcNotFoundErr(err) { - r.Logger.Info("ignoring failed event for member because it does not exist in the catalog", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - return nil - } - // Overwrite the workload ID with the one that has UID populated. - existingWorkload := res.GetResource() - - hsResource, err := r.createHealthStatusFromMember(member, existingWorkload.Id, false, nodeEntMeta) - if err != nil { - return err - } - - res, err = r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: hsResource.Id}) - if err != nil && !grpcNotFoundErr(err) { - return fmt.Errorf("error checking for existing HealthStatus %s: %w", hsResource.Id.Name, err) - } - - if err == nil { - existingHS := res.GetResource() - r.Logger.Debug("existing HealthStatus matching the member found", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - - // If the HealthStatus is identical, we're done. - if cmp.Equal(hsResource, existingHS, resourceCmpOptions...) { - r.Logger.Debug("no updates to perform on member HealthStatus", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - return nil - } - - // If the existing HealthStatus is different, add the Version to the patch for CAS write. - hsResource.Id = existingHS.Id - hsResource.Version = existingHS.Version - } - - if _, err := r.Client.Write(context.TODO(), &pbresource.WriteRequest{Resource: hsResource}); err != nil { - return fmt.Errorf("failed to write HealthStatus %s: %w", hsResource.Id.Name, err) - } - r.Logger.Info("updated consul HealthStatus in catalog", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - return nil -} - -// HandleLeftMember is used to handle members that gracefully -// left. They are removed if necessary. -func (r V2ConsulRegistrator) HandleLeftMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error { - return r.handleDeregisterMember("left", member, nodeEntMeta, removeServerFunc) -} - -// HandleReapMember is used to handle members that have been -// reaped after a prolonged failure. They are removed from the catalog. -func (r V2ConsulRegistrator) HandleReapMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error { - return r.handleDeregisterMember("reaped", member, nodeEntMeta, removeServerFunc) -} - -// handleDeregisterMember is used to remove a member of a given reason -func (r V2ConsulRegistrator) handleDeregisterMember(reason string, member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error { - if valid, _ := metadata.IsConsulServer(member); !valid { - return nil - } - - if nodeEntMeta == nil { - nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() - } - - r.Logger.Info("removing member", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - "reason", reason, - ) - - if err := removeServerFunc(member); err != nil { - return err - } - - // Do not remove our self. This can only happen if the current leader - // is leaving. Instead, we should allow a follower to take-over and - // remove us later. - if strings.EqualFold(member.Name, r.NodeName) && - strings.EqualFold(nodeEntMeta.PartitionOrDefault(), r.EntMeta.PartitionOrDefault()) { - r.Logger.Warn("removing self should be done by follower", - "name", r.NodeName, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - "reason", reason, - ) - return nil - } - - // Check if the workload exists - workloadID := &pbresource.ID{ - Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])), - Type: pbcatalog.WorkloadType, - Tenancy: resource.DefaultNamespacedTenancy(), - } - workloadID.Tenancy.Partition = nodeEntMeta.PartitionOrDefault() - - res, err := r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: workloadID}) - if err != nil && !grpcNotFoundErr(err) { - return fmt.Errorf("error checking for existing Workload %s: %w", workloadID.Name, err) - } - if grpcNotFoundErr(err) { - r.Logger.Info("ignoring reap event for member because it does not exist in the catalog", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - return nil - } - existingWorkload := res.GetResource() - - // The HealthStatus should be reaped automatically - if _, err := r.Client.Delete(context.TODO(), &pbresource.DeleteRequest{Id: existingWorkload.Id}); err != nil { - return fmt.Errorf("failed to delete Workload %s: %w", existingWorkload.Id.Name, err) - } - r.Logger.Info("deleted consul Workload", - "member", member.Name, - "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), - ) - return err -} - -func grpcNotFoundErr(err error) bool { - if err == nil { - return false - } - s, ok := status.FromError(err) - return ok && s.Code() == codes.NotFound -} diff --git a/agent/consul/leader_registrator_v2_test.go b/agent/consul/leader_registrator_v2_test.go deleted file mode 100644 index c2729c47fff8a..0000000000000 --- a/agent/consul/leader_registrator_v2_test.go +++ /dev/null @@ -1,583 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package consul - -import ( - "fmt" - "net" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/serf/serf" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/metadata" - "github.com/hashicorp/consul/agent/structs" - mockpbresource "github.com/hashicorp/consul/grpcmocks/proto-public/pbresource" - "github.com/hashicorp/consul/internal/resource" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -var ( - fakeWrappedErr = fmt.Errorf("fake test error") -) - -type testCase struct { - name string - member serf.Member - nodeNameOverride string // This is used in the HandleLeftMember test to avoid deregistering ourself - - existingWorkload *pbresource.Resource - workloadReadErr bool - workloadWriteErr bool - workloadDeleteErr bool - - existingHealthStatus *pbresource.Resource - healthstatusReadErr bool - healthstatusWriteErr bool - - mutatedWorkload *pbresource.Resource // leaving one of these out means the mock expects not to have a write/delete called - mutatedHealthStatus *pbresource.Resource - expErr string -} - -func Test_HandleAliveMember(t *testing.T) { - t.Parallel() - - run := func(t *testing.T, tt testCase) { - client := mockpbresource.NewResourceServiceClient(t) - mockClient := client.EXPECT() - - // Build mock expectations based on the order of HandleAliveMember resource calls - setupReadExpectation(t, mockClient, getTestWorkloadId(), tt.existingWorkload, tt.workloadReadErr) - setupWriteExpectation(t, mockClient, tt.mutatedWorkload, tt.workloadWriteErr) - if !tt.workloadReadErr && !tt.workloadWriteErr { - // We expect to bail before this read if there is an error earlier in the function - setupReadExpectation(t, mockClient, getTestHealthstatusId(), tt.existingHealthStatus, tt.healthstatusReadErr) - } - setupWriteExpectation(t, mockClient, tt.mutatedHealthStatus, tt.healthstatusWriteErr) - - registrator := V2ConsulRegistrator{ - Logger: hclog.New(&hclog.LoggerOptions{}), - NodeName: "test-server-1", - Client: client, - } - - // Mock join function - var joinMockCalled bool - joinMock := func(_ serf.Member, _ *metadata.Server) error { - joinMockCalled = true - return nil - } - - err := registrator.HandleAliveMember(tt.member, acl.DefaultEnterpriseMeta(), joinMock) - if tt.expErr != "" { - require.Contains(t, err.Error(), tt.expErr) - } else { - require.NoError(t, err) - } - require.True(t, joinMockCalled, "the mock join function was not called") - } - - tests := []testCase{ - { - name: "New alive member", - member: getTestSerfMember(serf.StatusAlive), - mutatedWorkload: getTestWorkload(t), - mutatedHealthStatus: getTestHealthStatus(t, true), - }, - { - name: "No updates needed", - member: getTestSerfMember(serf.StatusAlive), - existingWorkload: getTestWorkload(t), - existingHealthStatus: getTestHealthStatus(t, true), - }, - { - name: "Existing Workload and HS need to be updated", - member: getTestSerfMember(serf.StatusAlive), - existingWorkload: getTestWorkloadWithPort(t, 8301), - existingHealthStatus: getTestHealthStatus(t, false), - mutatedWorkload: getTestWorkload(t), - mutatedHealthStatus: getTestHealthStatus(t, true), - }, - { - name: "Only the HS needs to be updated", - member: getTestSerfMember(serf.StatusAlive), - existingWorkload: getTestWorkload(t), - existingHealthStatus: getTestHealthStatus(t, false), - mutatedHealthStatus: getTestHealthStatus(t, true), - }, - { - name: "Error reading Workload", - member: getTestSerfMember(serf.StatusAlive), - workloadReadErr: true, - expErr: "error checking for existing Workload", - }, - { - name: "Error writing Workload", - member: getTestSerfMember(serf.StatusAlive), - workloadWriteErr: true, - mutatedWorkload: getTestWorkload(t), - expErr: "failed to write Workload", - }, - { - name: "Error reading HealthStatus", - member: getTestSerfMember(serf.StatusAlive), - healthstatusReadErr: true, - mutatedWorkload: getTestWorkload(t), - expErr: "error checking for existing HealthStatus", - }, - { - name: "Error writing HealthStatus", - member: getTestSerfMember(serf.StatusAlive), - healthstatusWriteErr: true, - mutatedWorkload: getTestWorkload(t), - mutatedHealthStatus: getTestHealthStatus(t, true), - expErr: "failed to write HealthStatus", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - run(t, tt) - }) - } -} - -func Test_HandleFailedMember(t *testing.T) { - t.Parallel() - - run := func(t *testing.T, tt testCase) { - client := mockpbresource.NewResourceServiceClient(t) - mockClient := client.EXPECT() - - // Build mock expectations based on the order of HandleFailed resource calls - setupReadExpectation(t, mockClient, getTestWorkloadId(), tt.existingWorkload, tt.workloadReadErr) - if !tt.workloadReadErr && tt.existingWorkload != nil { - // We expect to bail before this read if there is an error earlier in the function or there is no workload - setupReadExpectation(t, mockClient, getTestHealthstatusId(), tt.existingHealthStatus, tt.healthstatusReadErr) - } - setupWriteExpectation(t, mockClient, tt.mutatedHealthStatus, tt.healthstatusWriteErr) - - registrator := V2ConsulRegistrator{ - Logger: hclog.New(&hclog.LoggerOptions{}), - NodeName: "test-server-1", - Client: client, - } - - err := registrator.HandleFailedMember(tt.member, acl.DefaultEnterpriseMeta()) - if tt.expErr != "" { - require.Contains(t, err.Error(), tt.expErr) - } else { - require.NoError(t, err) - } - } - - tests := []testCase{ - { - name: "Update non-existent HealthStatus", - member: getTestSerfMember(serf.StatusFailed), - existingWorkload: getTestWorkload(t), - mutatedHealthStatus: getTestHealthStatus(t, false), - }, - { - name: "Underlying Workload does not exist", - member: getTestSerfMember(serf.StatusFailed), - }, - { - name: "Update an existing HealthStatus", - member: getTestSerfMember(serf.StatusFailed), - existingWorkload: getTestWorkload(t), - existingHealthStatus: getTestHealthStatus(t, true), - mutatedHealthStatus: getTestHealthStatus(t, false), - }, - { - name: "HealthStatus is already critical - no updates needed", - member: getTestSerfMember(serf.StatusFailed), - existingWorkload: getTestWorkload(t), - existingHealthStatus: getTestHealthStatus(t, false), - }, - { - name: "Error reading Workload", - member: getTestSerfMember(serf.StatusFailed), - workloadReadErr: true, - expErr: "error checking for existing Workload", - }, - { - name: "Error reading HealthStatus", - member: getTestSerfMember(serf.StatusFailed), - existingWorkload: getTestWorkload(t), - healthstatusReadErr: true, - expErr: "error checking for existing HealthStatus", - }, - { - name: "Error writing HealthStatus", - member: getTestSerfMember(serf.StatusFailed), - existingWorkload: getTestWorkload(t), - healthstatusWriteErr: true, - mutatedHealthStatus: getTestHealthStatus(t, false), - expErr: "failed to write HealthStatus", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - run(t, tt) - }) - } -} - -// Test_HandleLeftMember also tests HandleReapMembers, which are the same core logic with some different logs. -func Test_HandleLeftMember(t *testing.T) { - t.Parallel() - - run := func(t *testing.T, tt testCase) { - client := mockpbresource.NewResourceServiceClient(t) - mockClient := client.EXPECT() - - // Build mock expectations based on the order of HandleLeftMember resource calls - // We check for the override, which we use to skip self de-registration - if tt.nodeNameOverride == "" { - setupReadExpectation(t, mockClient, getTestWorkloadId(), tt.existingWorkload, tt.workloadReadErr) - if tt.existingWorkload != nil && !tt.workloadReadErr { - setupDeleteExpectation(t, mockClient, tt.mutatedWorkload, tt.workloadDeleteErr) - } - } - - nodeName := "test-server-2" // This is not the same as the serf node so we don't dergister ourself. - if tt.nodeNameOverride != "" { - nodeName = tt.nodeNameOverride - } - - registrator := V2ConsulRegistrator{ - Logger: hclog.New(&hclog.LoggerOptions{}), - NodeName: nodeName, // We change this so that we don't deregister ourself - Client: client, - } - - // Mock join function - var removeMockCalled bool - removeMock := func(_ serf.Member) error { - removeMockCalled = true - return nil - } - - err := registrator.HandleLeftMember(tt.member, acl.DefaultEnterpriseMeta(), removeMock) - if tt.expErr != "" { - require.Contains(t, err.Error(), tt.expErr) - } else { - require.NoError(t, err) - } - require.True(t, removeMockCalled, "the mock remove function was not called") - } - - tests := []testCase{ - { - name: "Remove member", - member: getTestSerfMember(serf.StatusAlive), - existingWorkload: getTestWorkload(t), - mutatedWorkload: getTestWorkload(t), - }, - { - name: "Don't deregister ourself", - member: getTestSerfMember(serf.StatusAlive), - nodeNameOverride: "test-server-1", - }, - { - name: "Don't do anything if the Workload is already gone", - member: getTestSerfMember(serf.StatusAlive), - }, - { - name: "Remove member regardless of Workload payload", - member: getTestSerfMember(serf.StatusAlive), - existingWorkload: getTestWorkloadWithPort(t, 8301), - mutatedWorkload: getTestWorkload(t), - }, - { - name: "Error reading Workload", - member: getTestSerfMember(serf.StatusAlive), - workloadReadErr: true, - expErr: "error checking for existing Workload", - }, - { - name: "Error deleting Workload", - member: getTestSerfMember(serf.StatusAlive), - workloadDeleteErr: true, - existingWorkload: getTestWorkloadWithPort(t, 8301), - mutatedWorkload: getTestWorkload(t), - expErr: "failed to delete Workload", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - run(t, tt) - }) - } -} - -func setupReadExpectation( - t *testing.T, - mockClient *mockpbresource.ResourceServiceClient_Expecter, - expectedId *pbresource.ID, - existingResource *pbresource.Resource, - sendErr bool) { - - if sendErr { - mockClient.Read(mock.Anything, mock.Anything). - Return(nil, fakeWrappedErr). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.True(t, proto.Equal(expectedId, req.Id)) - }) - } else if existingResource != nil { - mockClient.Read(mock.Anything, mock.Anything). - Return(&pbresource.ReadResponse{ - Resource: existingResource, - }, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.True(t, proto.Equal(expectedId, req.Id)) - }) - } else { - mockClient.Read(mock.Anything, mock.Anything). - Return(nil, status.Error(codes.NotFound, "not found")). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.True(t, proto.Equal(expectedId, req.Id)) - }) - } -} - -func setupWriteExpectation( - t *testing.T, - mockClient *mockpbresource.ResourceServiceClient_Expecter, - expectedResource *pbresource.Resource, - sendErr bool) { - - // If there is no expected resource, we take that to mean we don't expect any client writes. - if expectedResource == nil { - return - } - - if sendErr { - mockClient.Write(mock.Anything, mock.Anything). - Return(nil, fakeWrappedErr). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.WriteRequest) - require.True(t, proto.Equal(expectedResource, req.Resource)) - }) - } else { - mockClient.Write(mock.Anything, mock.Anything). - Return(nil, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.WriteRequest) - require.True(t, proto.Equal(expectedResource, req.Resource)) - }) - } -} - -func setupDeleteExpectation( - t *testing.T, - mockClient *mockpbresource.ResourceServiceClient_Expecter, - expectedResource *pbresource.Resource, - sendErr bool) { - - expectedId := expectedResource.GetId() - - if sendErr { - mockClient.Delete(mock.Anything, mock.Anything). - Return(nil, fakeWrappedErr). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.DeleteRequest) - require.True(t, proto.Equal(expectedId, req.Id)) - }) - } else { - mockClient.Delete(mock.Anything, mock.Anything). - Return(nil, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.DeleteRequest) - require.True(t, proto.Equal(expectedId, req.Id)) - }) - } -} - -func getTestWorkload(t *testing.T) *pbresource.Resource { - return getTestWorkloadWithPort(t, 8300) -} - -func getTestWorkloadWithPort(t *testing.T, port int) *pbresource.Resource { - workload := &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1", Ports: []string{consulPortNameServer}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - consulPortNameServer: { - Port: uint32(port), - Protocol: pbcatalog.Protocol_PROTOCOL_TCP, - }, - }, - } - data, err := anypb.New(workload) - require.NoError(t, err) - - return &pbresource.Resource{ - Id: getTestWorkloadId(), - Data: data, - Metadata: map[string]string{ - "read_replica": "false", - "raft_version": "3", - "serf_protocol_current": "2", - "serf_protocol_min": "1", - "serf_protocol_max": "5", - "version": "1.18.0", - "grpc_port": "8502", - }, - } -} - -func getTestWorkloadId() *pbresource.ID { - return &pbresource.ID{ - Tenancy: resource.DefaultNamespacedTenancy(), - Type: pbcatalog.WorkloadType, - Name: "consul-server-72af047d-1857-2493-969e-53614a70b25a", - } -} - -func getTestHealthStatus(t *testing.T, passing bool) *pbresource.Resource { - healthStatus := &pbcatalog.HealthStatus{ - Type: string(structs.SerfCheckID), - Description: structs.SerfCheckName, - } - - if passing { - healthStatus.Status = pbcatalog.Health_HEALTH_PASSING - healthStatus.Output = structs.SerfCheckAliveOutput - } else { - healthStatus.Status = pbcatalog.Health_HEALTH_CRITICAL - healthStatus.Output = structs.SerfCheckFailedOutput - } - - data, err := anypb.New(healthStatus) - require.NoError(t, err) - - return &pbresource.Resource{ - Id: getTestHealthstatusId(), - Data: data, - Owner: getTestWorkloadId(), - } -} - -func getTestHealthstatusId() *pbresource.ID { - return &pbresource.ID{ - Tenancy: resource.DefaultNamespacedTenancy(), - Type: pbcatalog.HealthStatusType, - Name: "consul-server-72af047d-1857-2493-969e-53614a70b25a", - } -} - -func getTestSerfMember(status serf.MemberStatus) serf.Member { - return serf.Member{ - Name: "test-server-1", - Addr: net.ParseIP("127.0.0.1"), - Port: 8300, - // representative tags from a local dev deployment of ENT - Tags: map[string]string{ - "vsn_min": "2", - "vsn": "2", - "acls": "1", - "ft_si": "1", - "raft_vsn": "3", - "grpc_port": "8502", - "wan_join_port": "8500", - "dc": "dc1", - "segment": "", - "id": "72af047d-1857-2493-969e-53614a70b25a", - "ft_admpart": "1", - "role": "consul", - "build": "1.18.0", - "ft_ns": "1", - "vsn_max": "3", - "bootstrap": "1", - "expect": "1", - "port": "8300", - }, - Status: status, - ProtocolMin: 1, - ProtocolMax: 5, - ProtocolCur: 2, - DelegateMin: 2, - DelegateMax: 5, - DelegateCur: 4, - } -} - -// Test_ResourceCmpOptions_GeneratedFieldInsensitive makes sure are protocmp options are working as expected. -func Test_ResourceCmpOptions_GeneratedFieldInsensitive(t *testing.T) { - t.Parallel() - - res1 := getTestWorkload(t) - res2 := getTestWorkload(t) - - // Modify the generated fields - res2.Id.Uid = "123456" - res2.Version = "789" - res2.Generation = "millenial" - res2.Status = map[string]*pbresource.Status{ - "foo": {ObservedGeneration: "124"}, - } - - require.True(t, cmp.Equal(res1, res2, resourceCmpOptions...)) - - res1.Metadata["foo"] = "bar" - - require.False(t, cmp.Equal(res1, res2, resourceCmpOptions...)) -} - -// Test gRPC Error Codes Conditions -func Test_grpcNotFoundErr(t *testing.T) { - t.Parallel() - tests := []struct { - name string - err error - expected bool - }{ - { - name: "Nil Error", - }, - { - name: "Nonsense Error", - err: fmt.Errorf("boooooo!"), - }, - { - name: "gRPC Permission Denied Error", - err: status.Error(codes.PermissionDenied, "permission denied is not NotFound"), - }, - { - name: "gRPC NotFound Error", - err: status.Error(codes.NotFound, "bingo: not found"), - expected: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, grpcNotFoundErr(tt.err)) - }) - } -} diff --git a/agent/consul/leader_test.go b/agent/consul/leader_test.go index 619d6ae6dae1a..5834f26f2b192 100644 --- a/agent/consul/leader_test.go +++ b/agent/consul/leader_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -10,6 +10,7 @@ import ( "fmt" "io" "os" + "strconv" "strings" "testing" "time" @@ -23,75 +24,880 @@ import ( msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/leafcert" "github.com/hashicorp/consul/agent/structs" tokenStore "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/internal/resource" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" - "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" ) -func enableV2(t *testing.T) func(deps *Deps) { - return func(deps *Deps) { - deps.Experiments = []string{"resource-apis"} - m, _ := leafcert.NewTestManager(t, nil) - deps.LeafCertManager = m +func TestLeader_RegisterMember(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + dir2, c1 := testClient(t) + defer os.RemoveAll(dir2) + defer c1.Shutdown() + + // Try to join + joinLAN(t, c1, s1) + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Client should be registered + state := s1.fsm.State() + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(c1.config.NodeName, nil, "") + if err != nil { + r.Fatalf("err: %v", err) + } + if node == nil { + r.Fatal("client not registered") + } + }) + + // Should have a check + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(checks) != 1 { + t.Fatalf("client missing check") + } + if checks[0].CheckID != structs.SerfCheckID { + t.Fatalf("bad check: %v", checks[0]) + } + if checks[0].Name != structs.SerfCheckName { + t.Fatalf("bad check: %v", checks[0]) + } + if checks[0].Status != api.HealthPassing { + t.Fatalf("bad check: %v", checks[0]) + } + + // Server should be registered + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(s1.config.NodeName, nil, "") + if err != nil { + r.Fatalf("err: %v", err) + } + if node == nil { + r.Fatalf("server not registered") + } + }) + + // Service should be registered + _, services, err := state.NodeServices(nil, s1.config.NodeName, nil, "") + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services.Services["consul"]; !ok { + t.Fatalf("consul service not registered: %v", services) + } +} + +func TestLeader_FailedMember(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + dir2, c1 := testClient(t) + defer os.RemoveAll(dir2) + defer c1.Shutdown() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Try to join + joinLAN(t, c1, s1) + + // Fail the member + c1.Shutdown() + + // Should be registered + state := s1.fsm.State() + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(c1.config.NodeName, nil, "") + if err != nil { + r.Fatalf("err: %v", err) + } + if node == nil { + r.Fatal("client not registered") + } + }) + + // Should have a check + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(checks) != 1 { + t.Fatalf("client missing check") + } + if checks[0].CheckID != structs.SerfCheckID { + t.Fatalf("bad check: %v", checks[0]) + } + if checks[0].Name != structs.SerfCheckName { + t.Fatalf("bad check: %v", checks[0]) + } + + retry.Run(t, func(r *retry.R) { + _, checks, err = state.NodeChecks(nil, c1.config.NodeName, nil, "") + if err != nil { + r.Fatalf("err: %v", err) + } + if len(checks) != 1 { + r.Fatalf("client missing check") + } + if got, want := checks[0].Status, api.HealthCritical; got != want { + r.Fatalf("got status %q want %q", got, want) + } + }) +} + +func TestLeader_LeftMember(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + dir2, c1 := testClient(t) + defer os.RemoveAll(dir2) + defer c1.Shutdown() + + // Try to join + joinLAN(t, c1, s1) + + state := s1.fsm.State() + + // Should be registered + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(c1.config.NodeName, nil, "") + require.NoError(r, err) + require.NotNil(r, node, "client not registered") + }) + + // Node should leave + c1.Leave() + c1.Shutdown() + + // Should be deregistered + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(c1.config.NodeName, nil, "") + require.NoError(r, err) + require.Nil(r, node, "client still registered") + }) +} + +func TestLeader_ReapMember(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + dir2, c1 := testClient(t) + defer os.RemoveAll(dir2) + defer c1.Shutdown() + + // Try to join + joinLAN(t, c1, s1) + + state := s1.fsm.State() + + // Should be registered + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(c1.config.NodeName, nil, "") + require.NoError(r, err) + require.NotNil(r, node, "client not registered") + }) + + // Simulate a node reaping + mems := s1.LANMembersInAgentPartition() + var c1mem serf.Member + for _, m := range mems { + if m.Name == c1.config.NodeName { + c1mem = m + c1mem.Status = StatusReap + break + } + } + s1.reconcileCh <- c1mem + + // Should be deregistered; we have to poll quickly here because + // anti-entropy will put it back. + reaped := false + for start := time.Now(); time.Since(start) < 5*time.Second; { + _, node, err := state.GetNode(c1.config.NodeName, nil, "") + require.NoError(t, err) + if node == nil { + reaped = true + break + } + } + if !reaped { + t.Fatalf("client should not be registered") } } -// Test that Consul service is created in V2. -// In V1, the service is implicitly created - this is covered in leader_registrator_v1_test.go -func Test_InitConsulService(t *testing.T) { +func TestLeader_ReapOrLeftMember_IgnoreSelf(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } t.Parallel() - dir, s := testServerWithDepsAndConfig(t, enableV2(t), - func(c *Config) { + run := func(t *testing.T, status serf.MemberStatus, nameFn func(string) string) { + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) - defer os.RemoveAll(dir) - defer s.Shutdown() + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + nodeName := s1.config.NodeName + if nameFn != nil { + nodeName = nameFn(nodeName) + } + + state := s1.fsm.State() + + // Should be registered + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(nodeName, nil, "") + require.NoError(r, err) + require.NotNil(r, node, "server not registered") + }) + + // Simulate THIS node reaping or leaving + mems := s1.LANMembersInAgentPartition() + var s1mem serf.Member + for _, m := range mems { + if strings.EqualFold(m.Name, nodeName) { + s1mem = m + s1mem.Status = status + s1mem.Name = nodeName + break + } + } + s1.reconcileCh <- s1mem + + // Should NOT be deregistered; we have to poll quickly here because + // anti-entropy will put it back if it did get deleted. + reaped := false + for start := time.Now(); time.Since(start) < 5*time.Second; { + _, node, err := state.GetNode(nodeName, nil, "") + require.NoError(t, err) + if node == nil { + reaped = true + break + } + } + if reaped { + t.Fatalf("server should still be registered") + } + } + + t.Run("original name", func(t *testing.T) { + t.Parallel() + t.Run("left", func(t *testing.T) { + run(t, serf.StatusLeft, nil) + }) + t.Run("reap", func(t *testing.T) { + run(t, StatusReap, nil) + }) + }) + + t.Run("uppercased name", func(t *testing.T) { + t.Parallel() + t.Run("left", func(t *testing.T) { + run(t, serf.StatusLeft, strings.ToUpper) + }) + t.Run("reap", func(t *testing.T) { + run(t, StatusReap, strings.ToUpper) + }) + }) +} + +func TestLeader_CheckServersMeta(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + t.Parallel() + + ports := freeport.GetN(t, 2) // s3 grpc, s3 grpc_tls + + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "allow" + c.Bootstrap = true + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + dir2, s2 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "allow" + c.Bootstrap = false + }) + defer os.RemoveAll(dir2) + defer s2.Shutdown() + + dir3, s3 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "allow" + c.Bootstrap = false + c.GRPCPort = ports[0] + c.GRPCTLSPort = ports[1] + }) + defer os.RemoveAll(dir3) + defer s3.Shutdown() + + // Try to join + joinLAN(t, s1, s2) + joinLAN(t, s1, s3) - testrpc.WaitForRaftLeader(t, s.RPC, "dc1", testrpc.WithToken("root")) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s2.RPC, "dc1") + testrpc.WaitForLeader(t, s3.RPC, "dc1") + state := s1.fsm.State() - client := pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan) + consulService := &structs.NodeService{ + ID: "consul", + Service: "consul", + } + // s3 should be registered + retry.Run(t, func(r *retry.R) { + _, service, err := state.NodeService(nil, s3.config.NodeName, "consul", &consulService.EnterpriseMeta, "") + if err != nil { + r.Fatalf("err: %v", err) + } + if service == nil { + r.Fatal("client not registered") + } + if service.Meta["non_voter"] != "false" { + r.Fatalf("Expected to be non_voter == false, was: %s", service.Meta["non_voter"]) + } + }) - consulServiceID := &pbresource.ID{ - Name: structs.ConsulServiceName, - Type: pbcatalog.ServiceType, - Tenancy: resource.DefaultNamespacedTenancy(), + member := serf.Member{} + for _, m := range s1.serfLAN.Members() { + if m.Name == s3.config.NodeName { + member = m + member.Tags = make(map[string]string) + for key, value := range m.Tags { + member.Tags[key] = value + } + } } + if member.Name != s3.config.NodeName { + t.Fatal("could not find node in serf members") + } + versionToExpect := "19.7.9" retry.Run(t, func(r *retry.R) { - res, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: consulServiceID}) + // DEPRECATED - remove nonvoter tag in favor of read_replica in a future version of consul + member.Tags["nonvoter"] = "1" + member.Tags["read_replica"] = "1" + member.Tags["build"] = versionToExpect + err := s1.handleAliveMember(member, nil) + if err != nil { + r.Fatalf("Unexpected error :%v", err) + } + _, service, err := state.NodeService(nil, s3.config.NodeName, "consul", &consulService.EnterpriseMeta, "") if err != nil { r.Fatalf("err: %v", err) } - data := res.GetResource().GetData() - require.NotNil(r, data) + if service == nil { + r.Fatal("client not registered") + } + // DEPRECATED - remove non_voter in favor of read_replica in a future version of consul + if service.Meta["non_voter"] != "true" { + r.Fatalf("Expected to be non_voter == true, was: %s", service.Meta["non_voter"]) + } + if service.Meta["read_replica"] != "true" { + r.Fatalf("Expected to be read_replica == true, was: %s", service.Meta["non_voter"]) + } + newVersion := service.Meta["version"] + if newVersion != versionToExpect { + r.Fatalf("Expected version to be updated to %s, was %s", versionToExpect, newVersion) + } + grpcPort := service.Meta["grpc_port"] + if grpcPort != strconv.Itoa(ports[0]) { + r.Fatalf("Expected grpc port to be %d, was %s", ports[0], grpcPort) + } + grpcTLSPort := service.Meta["grpc_tls_port"] + if grpcTLSPort != strconv.Itoa(ports[1]) { + r.Fatalf("Expected grpc tls port to be %d, was %s", ports[1], grpcTLSPort) + } + }) +} - var service pbcatalog.Service - err = data.UnmarshalTo(&service) - require.NoError(r, err) +func TestLeader_ReapServer(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "allow" + c.Bootstrap = true + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + dir2, s2 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "allow" + c.Bootstrap = false + }) + defer os.RemoveAll(dir2) + defer s2.Shutdown() + + dir3, s3 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "allow" + c.Bootstrap = false + }) + defer os.RemoveAll(dir3) + defer s3.Shutdown() + + // Try to join + joinLAN(t, s1, s2) + joinLAN(t, s1, s3) + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s2.RPC, "dc1") + testrpc.WaitForLeader(t, s3.RPC, "dc1") + state := s1.fsm.State() + + // s3 should be registered + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(s3.config.NodeName, nil, "") + if err != nil { + r.Fatalf("err: %v", err) + } + if node == nil { + r.Fatal("client not registered") + } + }) + + // call reconcileReaped with a map that does not contain s3 + knownMembers := make(map[string]struct{}) + knownMembers[s1.config.NodeName] = struct{}{} + knownMembers[s2.config.NodeName] = struct{}{} + + err := s1.reconcileReaped(knownMembers, nil) + + if err != nil { + t.Fatalf("Unexpected error :%v", err) + } + // s3 should be deregistered + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(s3.config.NodeName, nil, "") + if err != nil { + r.Fatalf("err: %v", err) + } + if node != nil { + r.Fatalf("server with id %v should not be registered", s3.config.NodeID) + } + }) + +} + +func TestLeader_Reconcile_ReapMember(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Register a non-existing member + dead := structs.RegisterRequest{ + Datacenter: s1.config.Datacenter, + Node: "no-longer-around", + Address: "127.1.1.1", + Check: &structs.HealthCheck{ + Node: "no-longer-around", + CheckID: structs.SerfCheckID, + Name: structs.SerfCheckName, + Status: api.HealthCritical, + }, + WriteRequest: structs.WriteRequest{ + Token: "root", + }, + } + var out struct{} + if err := s1.RPC(context.Background(), "Catalog.Register", &dead, &out); err != nil { + t.Fatalf("err: %v", err) + } + + // Force a reconciliation + if err := s1.reconcile(); err != nil { + t.Fatalf("err: %v", err) + } + + // Node should be gone + state := s1.fsm.State() + _, node, err := state.GetNode("no-longer-around", nil, "") + if err != nil { + t.Fatalf("err: %v", err) + } + if node != nil { + t.Fatalf("client registered") + } +} + +func TestLeader_Reconcile(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + dir2, c1 := testClient(t) + defer os.RemoveAll(dir2) + defer c1.Shutdown() + + // Join before we have a leader, this should cause a reconcile! + joinLAN(t, c1, s1) + + // Should not be registered + state := s1.fsm.State() + _, node, err := state.GetNode(c1.config.NodeName, nil, "") + if err != nil { + t.Fatalf("err: %v", err) + } + if node != nil { + t.Fatalf("client registered") + } + + // Should be registered + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(c1.config.NodeName, nil, "") + if err != nil { + r.Fatalf("err: %v", err) + } + if node == nil { + r.Fatal("client not registered") + } + }) +} + +func TestLeader_Reconcile_Races(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + dir2, c1 := testClient(t) + defer os.RemoveAll(dir2) + defer c1.Shutdown() + + joinLAN(t, c1, s1) + + // Wait for the server to reconcile the client and register it. + state := s1.fsm.State() + var nodeAddr string + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(c1.config.NodeName, nil, "") + if err != nil { + r.Fatalf("err: %v", err) + } + if node == nil { + r.Fatal("client not registered") + } + nodeAddr = node.Address + }) + + // Add in some metadata via the catalog (as if the agent synced it + // there). We also set the serfHealth check to failing so the reconcile + // will attempt to flip it back + req := structs.RegisterRequest{ + Datacenter: s1.config.Datacenter, + Node: c1.config.NodeName, + ID: c1.config.NodeID, + Address: nodeAddr, + NodeMeta: map[string]string{"hello": "world"}, + Check: &structs.HealthCheck{ + Node: c1.config.NodeName, + CheckID: structs.SerfCheckID, + Name: structs.SerfCheckName, + Status: api.HealthCritical, + Output: "", + }, + } + var out struct{} + if err := s1.RPC(context.Background(), "Catalog.Register", &req, &out); err != nil { + t.Fatalf("err: %v", err) + } + + // Force a reconcile and make sure the metadata stuck around. + if err := s1.reconcile(); err != nil { + t.Fatalf("err: %v", err) + } + _, node, err := state.GetNode(c1.config.NodeName, nil, "") + if err != nil { + t.Fatalf("err: %v", err) + } + if node == nil { + t.Fatalf("bad") + } + if hello, ok := node.Meta["hello"]; !ok || hello != "world" { + t.Fatalf("bad") + } + + // Fail the member and wait for the health to go critical. + c1.Shutdown() + retry.Run(t, func(r *retry.R) { + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") + if err != nil { + r.Fatalf("err: %v", err) + } + if len(checks) != 1 { + r.Fatalf("client missing check") + } + if got, want := checks[0].Status, api.HealthCritical; got != want { + r.Fatalf("got state %q want %q", got, want) + } + }) + + // Make sure the metadata didn't get clobbered. + _, node, err = state.GetNode(c1.config.NodeName, nil, "") + if err != nil { + t.Fatalf("err: %v", err) + } + if node == nil { + t.Fatalf("bad") + } + if hello, ok := node.Meta["hello"]; !ok || hello != "world" { + t.Fatalf("bad") + } +} + +func TestLeader_LeftServer(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + dir2, s2 := testServerDCBootstrap(t, "dc1", false) + defer os.RemoveAll(dir2) + defer s2.Shutdown() + + dir3, s3 := testServerDCBootstrap(t, "dc1", false) + defer os.RemoveAll(dir3) + defer s3.Shutdown() + + // Put s1 last so we don't trigger a leader election. + servers := []*Server{s2, s3, s1} + + // Try to join + joinLAN(t, s2, s1) + joinLAN(t, s3, s1) + for _, s := range servers { + retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) }) + } + + // Kill any server + servers[0].Shutdown() + + // Force remove the non-leader (transition to left state) + if err := servers[1].RemoveFailedNode(servers[0].config.NodeName, false, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // Wait until the remaining servers show only 2 peers. + for _, s := range servers[1:] { + retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 2)) }) + } + s1.Shutdown() +} + +func TestLeader_LeftLeader(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() - // Spot check the Service - require.Equal(r, service.GetWorkloads().GetPrefixes(), []string{consulWorkloadPrefix}) - require.GreaterOrEqual(r, len(service.GetPorts()), 1) + dir2, s2 := testServerDCBootstrap(t, "dc1", false) + defer os.RemoveAll(dir2) + defer s2.Shutdown() + + dir3, s3 := testServerDCBootstrap(t, "dc1", false) + defer os.RemoveAll(dir3) + defer s3.Shutdown() + servers := []*Server{s1, s2, s3} + + // Try to join + joinLAN(t, s2, s1) + joinLAN(t, s3, s1) + + for _, s := range servers { + retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) }) + } + + // Kill the leader! + var leader *Server + for _, s := range servers { + if s.IsLeader() { + leader = s + break + } + } + if leader == nil { + t.Fatalf("Should have a leader") + } + if !leader.isReadyForConsistentReads() { + t.Fatalf("Expected leader to be ready for consistent reads ") + } + leader.Leave() + if leader.isReadyForConsistentReads() { + t.Fatalf("Expected consistent read state to be false ") + } + leader.Shutdown() + time.Sleep(100 * time.Millisecond) + + var remain *Server + for _, s := range servers { + if s == leader { + continue + } + remain = s + retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 2)) }) + } - //Since we're not running a full agent w/ serf, we can't check for valid endpoints + // Verify the old leader is deregistered + state := remain.fsm.State() + retry.Run(t, func(r *retry.R) { + _, node, err := state.GetNode(leader.config.NodeName, nil, "") + if err != nil { + r.Fatalf("err: %v", err) + } + if node != nil { + r.Fatal("leader should be deregistered") + } }) } +func TestLeader_MultiBootstrap(t *testing.T) { + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + dir2, s2 := testServer(t) + defer os.RemoveAll(dir2) + defer s2.Shutdown() + + servers := []*Server{s1, s2} + + // Try to join + joinLAN(t, s2, s1) + + for _, s := range servers { + retry.Run(t, func(r *retry.R) { + if got, want := len(s.serfLAN.Members()), 2; got != want { + r.Fatalf("got %d peers want %d", got, want) + } + }) + } + + // Ensure we don't have multiple raft peers + for _, s := range servers { + peers, _ := s.autopilot.NumVoters() + if peers != 1 { + t.Fatalf("should only have 1 raft peer!") + } + } +} + func TestLeader_TombstoneGC_Reset(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -834,7 +1640,7 @@ func TestLeader_ConfigEntryBootstrap_Fail(t *testing.T) { deps := newDefaultDeps(t, config) deps.Logger = logger - srv, err := NewServer(config, deps, grpc.NewServer(), nil, logger, nil) + srv, err := NewServer(config, deps, grpc.NewServer(), nil, logger) require.NoError(t, err) defer srv.Shutdown() @@ -1279,8 +2085,8 @@ func TestDatacenterSupportsIntentionsAsConfigEntries(t *testing.T) { LegacyUpdateTime: got.Sources[0].LegacyUpdateTime, }, }, - RaftIndex: got.RaftIndex, + Hash: got.GetHash(), } got.Hash = 0 require.Equal(t, expect, got) diff --git a/agent/consul/logging.go b/agent/consul/logging.go index cfafe62b0d31f..da07daa8febfa 100644 --- a/agent/consul/logging.go +++ b/agent/consul/logging.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/logging_test.go b/agent/consul/logging_test.go index 3b756c0bb6226..7f090992a7a26 100644 --- a/agent/consul/logging_test.go +++ b/agent/consul/logging_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/merge.go b/agent/consul/merge.go index f6771d110f0ce..21b59f1aa92d6 100644 --- a/agent/consul/merge.go +++ b/agent/consul/merge.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/merge_ce.go b/agent/consul/merge_ce.go index c5d8096e708a7..59704f6533203 100644 --- a/agent/consul/merge_ce.go +++ b/agent/consul/merge_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/merge_ce_test.go b/agent/consul/merge_ce_test.go index e12ea660f72fa..8b0a7514ab264 100644 --- a/agent/consul/merge_ce_test.go +++ b/agent/consul/merge_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/merge_test.go b/agent/consul/merge_test.go index bc9d1f2cb4da8..f5f5c6d88ff1c 100644 --- a/agent/consul/merge_test.go +++ b/agent/consul/merge_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/multilimiter/multilimiter.go b/agent/consul/multilimiter/multilimiter.go index a0b9a6044f0df..f40e6c501abe6 100644 --- a/agent/consul/multilimiter/multilimiter.go +++ b/agent/consul/multilimiter/multilimiter.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package multilimiter diff --git a/agent/consul/multilimiter/multilimiter_test.go b/agent/consul/multilimiter/multilimiter_test.go index e7cdab4e14824..b649bdb6c9c95 100644 --- a/agent/consul/multilimiter/multilimiter_test.go +++ b/agent/consul/multilimiter/multilimiter_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package multilimiter diff --git a/agent/consul/operator_autopilot_endpoint.go b/agent/consul/operator_autopilot_endpoint.go index 39bd5b648ddb8..b6ef7d38e6565 100644 --- a/agent/consul/operator_autopilot_endpoint.go +++ b/agent/consul/operator_autopilot_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/operator_autopilot_endpoint_test.go b/agent/consul/operator_autopilot_endpoint_test.go index 4cef3f0960d42..c9258e9aa2705 100644 --- a/agent/consul/operator_autopilot_endpoint_test.go +++ b/agent/consul/operator_autopilot_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/operator_backend.go b/agent/consul/operator_backend.go index 136baa1a22ab1..a72128735ab3c 100644 --- a/agent/consul/operator_backend.go +++ b/agent/consul/operator_backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/operator_backend_test.go b/agent/consul/operator_backend_test.go index 6127868b9f000..2189fe00630c7 100644 --- a/agent/consul/operator_backend_test.go +++ b/agent/consul/operator_backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/consul/acl" external "github.com/hashicorp/consul/agent/grpc-external" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/lib/testhelpers" "github.com/hashicorp/consul/proto/private/pboperator" "github.com/hashicorp/consul/sdk/testutil/retry" "google.golang.org/grpc/credentials/insecure" @@ -26,8 +25,6 @@ import ( func TestOperatorBackend_TransferLeader(t *testing.T) { t.Parallel() - testhelpers.SkipFlake(t) - conf := testClusterConfig{ Datacenter: "dc1", Servers: 3, diff --git a/agent/consul/operator_endpoint.go b/agent/consul/operator_endpoint.go index 67259c9408a95..33e73e6ee1df9 100644 --- a/agent/consul/operator_endpoint.go +++ b/agent/consul/operator_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/operator_raft_endpoint.go b/agent/consul/operator_raft_endpoint.go index b8a16fc2c3ec9..7b0bcbc5cc035 100644 --- a/agent/consul/operator_raft_endpoint.go +++ b/agent/consul/operator_raft_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/operator_raft_endpoint_test.go b/agent/consul/operator_raft_endpoint_test.go index bb2dc88fc89e3..7242c40e6c45a 100644 --- a/agent/consul/operator_raft_endpoint_test.go +++ b/agent/consul/operator_raft_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/operator_usage_endpoint.go b/agent/consul/operator_usage_endpoint.go index 68f3137d0a61f..d23815b147c7d 100644 --- a/agent/consul/operator_usage_endpoint.go +++ b/agent/consul/operator_usage_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/options.go b/agent/consul/options.go index ad4f7eb4d9ba7..26cb2471a89bc 100644 --- a/agent/consul/options.go +++ b/agent/consul/options.go @@ -1,30 +1,25 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul import ( "google.golang.org/grpc" - "github.com/hashicorp/consul/lib/stringslice" - "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/go-hclog" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/grpc-external/limiter" "github.com/hashicorp/consul/agent/hcp" - "github.com/hashicorp/consul/agent/leafcert" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/token" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/tlsutil" ) type Deps struct { - LeafCertManager *leafcert.Manager EventPublisher *stream.EventPublisher Logger hclog.InterceptLogger TLSConfigurator *tlsutil.Configurator @@ -34,7 +29,6 @@ type Deps struct { GRPCConnPool GRPCClientConner LeaderForwarder LeaderForwarder XDSStreamLimiter *limiter.SessionLimiter - Registry resource.Registry // GetNetRPCInterceptorFunc, if not nil, sets the net/rpc rpc.ServerServiceCallInterceptor on // the server side to record metrics around the RPC requests. If nil, no interceptor is added to // the rpc server. @@ -50,42 +44,6 @@ type Deps struct { EnterpriseDeps } -// UseV2DNS returns true if "v2-dns" is present in the Experiments -// array of the agent config. It is assumed if the v2 resource APIs are enabled. -func (d Deps) UseV2DNS() bool { - if stringslice.Contains(d.Experiments, V2DNSExperimentName) || d.UseV2Resources() { - return true - } - return false -} - -// UseV2Resources returns true if "resource-apis" is present in the Experiments -// array of the agent config. -func (d Deps) UseV2Resources() bool { - if stringslice.Contains(d.Experiments, CatalogResourceExperimentName) { - return true - } - return false -} - -// UseV2Tenancy returns true if "v2tenancy" is present in the Experiments -// array of the agent config. -func (d Deps) UseV2Tenancy() bool { - if stringslice.Contains(d.Experiments, V2TenancyExperimentName) { - return true - } - return false -} - -// HCPAllowV2Resources returns true if "hcp-v2-resource-apis" is present in the Experiments -// array of the agent config. -func (d Deps) HCPAllowV2Resources() bool { - if stringslice.Contains(d.Experiments, HCPAllowV2ResourceAPIs) { - return true - } - return false -} - type GRPCClientConner interface { ClientConn(datacenter string) (*grpc.ClientConn, error) ClientConnLeader() (*grpc.ClientConn, error) diff --git a/agent/consul/options_ce.go b/agent/consul/options_ce.go index dbc8dd15ca7ee..7604ddd87f925 100644 --- a/agent/consul/options_ce.go +++ b/agent/consul/options_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/peering_backend.go b/agent/consul/peering_backend.go index 5a27bc6442a1d..1771be10fbaac 100644 --- a/agent/consul/peering_backend.go +++ b/agent/consul/peering_backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/peering_backend_ce.go b/agent/consul/peering_backend_ce.go index 22b70886ab4ae..81a133b3425a0 100644 --- a/agent/consul/peering_backend_ce.go +++ b/agent/consul/peering_backend_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/peering_backend_ce_test.go b/agent/consul/peering_backend_ce_test.go index d8c476925969c..410bf5f234273 100644 --- a/agent/consul/peering_backend_ce_test.go +++ b/agent/consul/peering_backend_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/peering_backend_test.go b/agent/consul/peering_backend_test.go index adfe6fe228f95..648052b7a15fe 100644 --- a/agent/consul/peering_backend_test.go +++ b/agent/consul/peering_backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/prepared_query/template.go b/agent/consul/prepared_query/template.go index 03cf9d2f58505..ef2e2abd4cae4 100644 --- a/agent/consul/prepared_query/template.go +++ b/agent/consul/prepared_query/template.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package prepared_query diff --git a/agent/consul/prepared_query/template_test.go b/agent/consul/prepared_query/template_test.go index 7c9b2f1a3af29..d4f78402140de 100644 --- a/agent/consul/prepared_query/template_test.go +++ b/agent/consul/prepared_query/template_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package prepared_query diff --git a/agent/consul/prepared_query/walk.go b/agent/consul/prepared_query/walk.go index 72296c0c7fe64..da53ce105b5f3 100644 --- a/agent/consul/prepared_query/walk.go +++ b/agent/consul/prepared_query/walk.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package prepared_query diff --git a/agent/consul/prepared_query/walk_ce_test.go b/agent/consul/prepared_query/walk_ce_test.go index 00f3edb10de11..dec84b241b0a3 100644 --- a/agent/consul/prepared_query/walk_ce_test.go +++ b/agent/consul/prepared_query/walk_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package prepared_query diff --git a/agent/consul/prepared_query/walk_test.go b/agent/consul/prepared_query/walk_test.go index b788571e4af5f..9ff380248bd2b 100644 --- a/agent/consul/prepared_query/walk_test.go +++ b/agent/consul/prepared_query/walk_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package prepared_query diff --git a/agent/consul/prepared_query_endpoint.go b/agent/consul/prepared_query_endpoint.go index 139556bb1af8c..101839708ea2f 100644 --- a/agent/consul/prepared_query_endpoint.go +++ b/agent/consul/prepared_query_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/prepared_query_endpoint_ce.go b/agent/consul/prepared_query_endpoint_ce.go index 3498b7063d3e4..612b81b2e6880 100644 --- a/agent/consul/prepared_query_endpoint_ce.go +++ b/agent/consul/prepared_query_endpoint_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/prepared_query_endpoint_ce_test.go b/agent/consul/prepared_query_endpoint_ce_test.go index d090fa1d84046..876f91d42cd5d 100644 --- a/agent/consul/prepared_query_endpoint_ce_test.go +++ b/agent/consul/prepared_query_endpoint_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/prepared_query_endpoint_test.go b/agent/consul/prepared_query_endpoint_test.go index 2761df4093faa..f7db03f7faba6 100644 --- a/agent/consul/prepared_query_endpoint_test.go +++ b/agent/consul/prepared_query_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -2808,7 +2808,7 @@ func TestPreparedQuery_Wrapper(t *testing.T) { t.Fatalf("bad: %v", ret) } // Since we have no idea when the joinWAN operation completes - // we keep on querying until the join operation completes. + // we keep on querying until the the join operation completes. retry.Run(t, func(r *retry.R) { r.Check(s1.forwardDC("Status.Ping", "dc2", &struct{}{}, &struct{}{})) }) diff --git a/agent/consul/raft_handle.go b/agent/consul/raft_handle.go index 2906fe7115f19..bf38f0ee9e767 100644 --- a/agent/consul/raft_handle.go +++ b/agent/consul/raft_handle.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/raft_rpc.go b/agent/consul/raft_rpc.go index 1a0d6caa64d4a..7928ad31e2b9a 100644 --- a/agent/consul/raft_rpc.go +++ b/agent/consul/raft_rpc.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/rate/handler.go b/agent/consul/rate/handler.go index bb3aef63931dd..c18ec85eddc8d 100644 --- a/agent/consul/rate/handler.go +++ b/agent/consul/rate/handler.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // Package rate implements server-side RPC rate limiting. package rate diff --git a/agent/consul/rate/handler_ce.go b/agent/consul/rate/handler_ce.go index b0a7beb7c18a0..fc33a69487f88 100644 --- a/agent/consul/rate/handler_ce.go +++ b/agent/consul/rate/handler_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package rate diff --git a/agent/consul/rate/handler_test.go b/agent/consul/rate/handler_test.go index 268568ce9599c..54a8b86a4b989 100644 --- a/agent/consul/rate/handler_test.go +++ b/agent/consul/rate/handler_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package rate diff --git a/agent/consul/rate/metrics.go b/agent/consul/rate/metrics.go index ac69c14661776..cbf796fa935c6 100644 --- a/agent/consul/rate/metrics.go +++ b/agent/consul/rate/metrics.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package rate diff --git a/agent/consul/replication.go b/agent/consul/replication.go index 08b8811129bea..0d85d082653cb 100644 --- a/agent/consul/replication.go +++ b/agent/consul/replication.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/replication_test.go b/agent/consul/replication_test.go index 27000fc563b78..e37e19b1f2933 100644 --- a/agent/consul/replication_test.go +++ b/agent/consul/replication_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/reporting/reporting.go b/agent/consul/reporting/reporting.go index d6c480f6bace4..fec7050f695ba 100644 --- a/agent/consul/reporting/reporting.go +++ b/agent/consul/reporting/reporting.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package reporting diff --git a/agent/consul/reporting/reporting_ce.go b/agent/consul/reporting/reporting_ce.go index b9eb978b8c026..a1e95a177416c 100644 --- a/agent/consul/reporting/reporting_ce.go +++ b/agent/consul/reporting/reporting_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package reporting diff --git a/agent/consul/rpc.go b/agent/consul/rpc.go index 8c5ad14efffd6..f97a5ff886746 100644 --- a/agent/consul/rpc.go +++ b/agent/consul/rpc.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -244,7 +244,7 @@ func (s *Server) handleConn(conn net.Conn, isTLS bool) { s.handleInsecureConn(conn) case pool.RPCGRPC: - s.internalGRPCHandler.Handle(conn) + s.grpcHandler.Handle(conn) case pool.RPCRaftForwarding: s.handleRaftForwarding(conn) @@ -315,7 +315,7 @@ func (s *Server) handleNativeTLS(conn net.Conn) { s.handleSnapshotConn(tlsConn) case pool.ALPN_RPCGRPC: - s.internalGRPCHandler.Handle(tlsConn) + s.grpcHandler.Handle(tlsConn) case pool.ALPN_RPCRaftForwarding: s.handleRaftForwarding(tlsConn) diff --git a/agent/consul/rpc_test.go b/agent/consul/rpc_test.go index 39351c98ca92a..f1b05fa528287 100644 --- a/agent/consul/rpc_test.go +++ b/agent/consul/rpc_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/rtt.go b/agent/consul/rtt.go index 1599301e158df..5db0a634b4352 100644 --- a/agent/consul/rtt.go +++ b/agent/consul/rtt.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/rtt_test.go b/agent/consul/rtt_test.go index aeed0b66f50d5..9420f36c83657 100644 --- a/agent/consul/rtt_test.go +++ b/agent/consul/rtt_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/segment_ce.go b/agent/consul/segment_ce.go index 25e941ced0056..a3c0162d2a1e2 100644 --- a/agent/consul/segment_ce.go +++ b/agent/consul/segment_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/serf_filter.go b/agent/consul/serf_filter.go index 7b09c2b9e8020..fd6911bf0ab8a 100644 --- a/agent/consul/serf_filter.go +++ b/agent/consul/serf_filter.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/serf_test.go b/agent/consul/serf_test.go index 4d4bc4926a46d..62cc6d0a0ba3f 100644 --- a/agent/consul/serf_test.go +++ b/agent/consul/serf_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/server.go b/agent/consul/server.go index d3211dc264821..7df10e5b5c82e 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -20,12 +20,6 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/fullstorydev/grpchan/inprocgrpc" - "go.etcd.io/bbolt" - "golang.org/x/time/rate" - "google.golang.org/grpc" - - "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/go-connlimit" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" @@ -36,8 +30,15 @@ import ( walmetrics "github.com/hashicorp/raft-wal/metrics" "github.com/hashicorp/raft-wal/verifier" "github.com/hashicorp/serf/serf" + "go.etcd.io/bbolt" + "golang.org/x/time/rate" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/agent/blockingquery" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/authmethod" @@ -51,37 +52,38 @@ import ( "github.com/hashicorp/consul/agent/consul/usagemetrics" "github.com/hashicorp/consul/agent/consul/wanfed" "github.com/hashicorp/consul/agent/consul/xdscapacity" + aclgrpc "github.com/hashicorp/consul/agent/grpc-external/services/acl" + "github.com/hashicorp/consul/agent/grpc-external/services/connectca" + "github.com/hashicorp/consul/agent/grpc-external/services/dataplane" "github.com/hashicorp/consul/agent/grpc-external/services/peerstream" + resourcegrpc "github.com/hashicorp/consul/agent/grpc-external/services/resource" + "github.com/hashicorp/consul/agent/grpc-external/services/serverdiscovery" + agentgrpc "github.com/hashicorp/consul/agent/grpc-internal" + "github.com/hashicorp/consul/agent/grpc-internal/services/subscribe" "github.com/hashicorp/consul/agent/hcp" - "github.com/hashicorp/consul/agent/hcp/bootstrap" hcpclient "github.com/hashicorp/consul/agent/hcp/client" logdrop "github.com/hashicorp/consul/agent/log-drop" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" "github.com/hashicorp/consul/agent/rpc/middleware" + "github.com/hashicorp/consul/agent/rpc/operator" "github.com/hashicorp/consul/agent/rpc/peering" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" - "github.com/hashicorp/consul/internal/auth" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - hcpctl "github.com/hashicorp/consul/internal/hcp" "github.com/hashicorp/consul/internal/mesh" - proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" - "github.com/hashicorp/consul/internal/multicluster" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" "github.com/hashicorp/consul/internal/resource/reaper" - "github.com/hashicorp/consul/internal/storage" raftstorage "github.com/hashicorp/consul/internal/storage/raft" - "github.com/hashicorp/consul/internal/tenancy" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/routine" "github.com/hashicorp/consul/lib/stringslice" "github.com/hashicorp/consul/logging" - "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1/pbproxystate" "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/pbsubscribe" "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/types" cslversion "github.com/hashicorp/consul/version" @@ -130,25 +132,10 @@ const ( // and wait for a periodic reconcile. reconcileChSize = 256 - LeaderTransferMinVersion = "1.6.0" - CatalogResourceExperimentName = "resource-apis" - V2DNSExperimentName = "v2dns" - V2TenancyExperimentName = "v2tenancy" - HCPAllowV2ResourceAPIs = "hcp-v2-resource-apis" -) + LeaderTransferMinVersion = "1.6.0" -// IsExperimentAllowedOnSecondaries returns true if an experiment is currently -// disallowed for wan federated secondary datacenters. -// -// Likely these will all be short lived exclusions. -func IsExperimentAllowedOnSecondaries(name string) bool { - switch name { - case CatalogResourceExperimentName, V2DNSExperimentName, V2TenancyExperimentName: - return false - default: - return true - } -} + catalogResourceExperimentName = "resource-apis" +) const ( aclPolicyReplicationRoutineName = "ACL policy replication" @@ -256,7 +243,7 @@ type Server struct { // serf cluster that spans datacenters eventChWAN chan serf.Event - // wanMembershipNotifyCh is used to receive notifications that the + // wanMembershipNotifyCh is used to receive notifications that the the // serfWAN wan pool may have changed. // // If this is nil, notification is skipped. @@ -286,9 +273,6 @@ type Server struct { // raftStorageBackend is the Raft-backed storage backend for resources. raftStorageBackend *raftstorage.Backend - // the currently in use storage backend - storageBackend storage.Backend - // reconcileCh is used to pass events from the serf handler // into the leader manager, so that the strong state can be // updated @@ -304,35 +288,20 @@ type Server struct { // is only ever closed. leaveCh chan struct{} + // externalACLServer serves the ACL service exposed on the external gRPC port. + // It is also exposed on the internal multiplexed "server" port to enable + // RPC forwarding. + externalACLServer *aclgrpc.Server + + // externalConnectCAServer serves the Connect CA service exposed on the external + // gRPC port. It is also exposed on the internal multiplexed "server" port to + // enable RPC forwarding. + externalConnectCAServer *connectca.Server + // externalGRPCServer has a gRPC server exposed on the dedicated gRPC ports, as // opposed to the multiplexed "server" port which is served by grpcHandler. externalGRPCServer *grpc.Server - // insecureUnsafeGRPCChan is used to access gRPC services on the server without going - // through protobuf serialization/deserialization, performing any network IO or requiring - // authorization. This may be passed as the gRPC client conn to any standard gRPC client - // constructor instead of a standard network protocol based client conn. Using this as the - // client conn will cut down on CPU and memory usage for doing in-process gRPC but comes - // with the drawbacks that any data sent over this interface is inherently shared - // and both ends must cooperate with regards to the immutability. Therefore, in - // most cases the insecureSafeGRPCChannel should be used instead which will clone the protobuf - // types as they pass through. - insecureUnsafeGRPCChan *inprocgrpc.Channel - - // insecureSafeGRPCChan is used to access gRPC services on the server without going - // through the standard protobuf serialization/deserialization, performing network - // io or requiring authorization. This gRPC client conn implementation will still - // clone protobuf messages as they pass through and so the client and server - // implementations do not need to coordinate with regards to data immutability. - insecureSafeGRPCChan *inprocgrpc.Channel - - // secureSafeGRPCChan is used to access gRPC services on the server without going - // through the standard protobuf serialization/deserialization or performing network - // io. This gRPC client conn implementation will still clone protobuf messages as - // they pass through and so the client and server implementations do not need - // to coordinate with regards to data immutability. - secureSafeGRPCChan *inprocgrpc.Channel - // router is used to map out Consul servers in the WAN and in Consul // Enterprise user-defined areas. router *router.Router @@ -345,9 +314,9 @@ type Server struct { rpcConnLimiter connlimit.Limiter // Listener is used to listen for incoming connections - Listener net.Listener - internalGRPCHandler connHandler - rpcServer *rpc.Server + Listener net.Listener + grpcHandler connHandler + rpcServer *rpc.Server // incomingRPCLimiter rate-limits incoming net/rpc and gRPC calls. incomingRPCLimiter rpcRate.RequestLimitsHandler @@ -438,9 +407,6 @@ type Server struct { // Manager to handle starting/stopping go routines when establishing/revoking raft leadership leaderRoutineManager *routine.Manager - // registrator is an implemenation that translates serf events of Consul servers into catalog events - registrator ConsulRegistrator - // publisher is the EventPublisher to be shared amongst various server components. Events from // modifications to the FSM, autopilot and others will flow through here. If in the future we // need Events generated outside of the Server and all its components, then we could move @@ -450,39 +416,43 @@ type Server struct { // peeringBackend is shared between the external and internal gRPC services for peering peeringBackend *PeeringBackend + // operatorBackend is shared between the external and internal gRPC services for peering + operatorBackend *OperatorBackend + // peerStreamServer is a server used to handle peering streams from external clusters. peerStreamServer *peerstream.Server + // peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens. + peeringServer *peering.Server + // xdsCapacityController controls the number of concurrent xDS streams the // server is able to handle. xdsCapacityController *xdscapacity.Controller // hcpManager handles pushing server status updates to the HashiCorp Cloud Platform when enabled - hcpManager *hcp.HCPManager + hcpManager *hcp.Manager // embedded struct to hold all the enterprise specific data EnterpriseServer + operatorServer *operator.Server // routineManager is responsible for managing longer running go routines // run by the Server routineManager *routine.Manager + // typeRegistry contains Consul's registered resource types. + typeRegistry resource.Registry + + // internalResourceServiceClient is a client that can be used to communicate + // with the Resource Service in-process (i.e. not via the network) without auth. + // It should only be used for purely-internal workloads, such as controllers. + internalResourceServiceClient pbresource.ResourceServiceClient + // controllerManager schedules the execution of controllers. controllerManager *controller.Manager // handles metrics reporting to HashiCorp reportingManager *reporting.ReportingManager - - registry resource.Registry - - useV2Resources bool - - // useV2Tenancy is tied to the "v2tenancy" feature flag. - useV2Tenancy bool - - // whether v2 resources are enabled for use with HCP - // TODO(CC-6389): Remove once resource-apis is no longer considered experimental and is supported by HCP - hcpAllowV2Resources bool } func (s *Server) DecrementBlockingQueries() uint64 { @@ -498,28 +468,14 @@ func (s *Server) IncrementBlockingQueries() uint64 { } type connHandler interface { - RegisterService(*grpc.ServiceDesc, any) Run() error Handle(conn net.Conn) Shutdown() error } -// ProxyUpdater is an interface for ProxyTracker. -type ProxyUpdater interface { - // PushChange allows pushing a computed ProxyState to xds for xds resource generation to send to a proxy. - PushChange(id *pbresource.ID, snapshot proxysnapshot.ProxySnapshot) error - - // ProxyConnectedToServer returns whether this id is connected to this server. If it is connected, it also returns - // the token as the first argument. - ProxyConnectedToServer(id *pbresource.ID) (string, bool) - - EventChannel() chan controller.Event -} - // NewServer is used to construct a new Consul server from the configuration // and extra options, potentially returning an error. -func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, - incomingRPCLimiter rpcRate.RequestLimitsHandler, serverLogger hclog.InterceptLogger, proxyUpdater ProxyUpdater) (*Server, error) { +func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, incomingRPCLimiter rpcRate.RequestLimitsHandler, serverLogger hclog.InterceptLogger) (*Server, error) { logger := flat.Logger if err := config.CheckProtocolVersion(); err != nil { return nil, err @@ -527,7 +483,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, if config.DataDir == "" && !config.DevMode { return nil, fmt.Errorf("Config must provide a DataDir") } - if err := config.CheckEnumStrings(); err != nil { + if err := config.CheckACL(); err != nil { return nil, err } @@ -571,10 +527,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, publisher: flat.EventPublisher, incomingRPCLimiter: incomingRPCLimiter, routineManager: routine.NewManager(logger.Named(logging.ConsulServer)), - registry: flat.Registry, - useV2Resources: flat.UseV2Resources(), - useV2Tenancy: flat.UseV2Tenancy(), - hcpAllowV2Resources: flat.HCPAllowV2Resources(), + typeRegistry: resource.NewRegistry(), } incomingRPCLimiter.Register(s) @@ -584,8 +537,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, } go s.raftStorageBackend.Run(&lib.StopChannelContext{StopCh: shutdownCh}) - s.storageBackend = s.raftStorageBackend - s.fsm = fsm.NewFromDeps(fsm.Deps{ Logger: flat.Logger, NewStateStore: func() *state.Store { @@ -596,30 +547,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, }) s.hcpManager = hcp.NewManager(hcp.ManagerConfig{ - CloudConfig: flat.HCP.Config, - StatusFn: s.hcpServerStatus(flat), - Logger: logger.Named("hcp_manager"), - SCADAProvider: flat.HCP.Provider, - TelemetryProvider: flat.HCP.TelemetryProvider, - ManagementTokenUpserterFn: func(name, secretId string) error { - // Check the state of the server before attempting to upsert the token. Otherwise, - // the upsert will fail and log errors that do not require action from the user. - if s.config.ACLsEnabled && s.IsLeader() && s.InPrimaryDatacenter() { - // Idea for improvement: Upsert a token with a well-known accessorId here instead - // of a randomly generated one. This would prevent any possible insertion collision between - // this and the insertion that happens during the ACL initialization process (initializeACLs function) - return s.upsertManagementToken(name, secretId) - } - return nil - }, - ManagementTokenDeleterFn: func(secretId string) error { - // Check the state of the server before attempting to delete the token.Otherwise, - // the delete will fail and log errors that do not require action from the user. - if s.config.ACLsEnabled && s.IsLeader() && s.InPrimaryDatacenter() { - return s.deleteManagementToken(secretId) - } - return nil - }, + Client: flat.HCP.Client, + StatusFn: s.hcpServerStatus(flat), + Logger: logger.Named("hcp_manager"), }) var recorder *middleware.RequestRecorder @@ -633,17 +563,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, } rpcServerOpts := []func(*rpc.Server){ - rpc.WithPreBodyInterceptor( - middleware.ChainedRPCPreBodyInterceptor( - func(reqServiceMethod string, sourceAddr net.Addr) error { - if s.useV2Resources && isV1CatalogRequest(reqServiceMethod) { - return structs.ErrUsingV2CatalogExperiment - } - return nil - }, - middleware.GetNetRPCRateLimitingInterceptor(s.incomingRPCLimiter, middleware.NewPanicHandler(s.logger)), - ), - ), + rpc.WithPreBodyInterceptor(middleware.GetNetRPCRateLimitingInterceptor(s.incomingRPCLimiter, middleware.NewPanicHandler(s.logger))), } if flat.GetNetRPCInterceptorFunc != nil { @@ -745,7 +665,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, } // Initialize the Raft server. - if err := s.setupRaft(stringslice.Contains(flat.Experiments, CatalogResourceExperimentName)); err != nil { + if err := s.setupRaft(); err != nil { s.Shutdown() return nil, fmt.Errorf("Failed to start Raft: %v", err) } @@ -874,55 +794,24 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, s.reportingManager = reporting.NewReportingManager(s.logger, getEnterpriseReportingDeps(flat), s, s.fsm.State()) go s.reportingManager.Run(&lib.StopChannelContext{StopCh: s.shutdownCh}) - // configure the server specific grpc interfaces (in-process + internal multiplexed grpc) - if err := s.setupGRPCInterfaces(config, flat); err != nil { - return nil, err - } - - // register server specific grpc services with all the interfaces they should be exposed on. - if err := s.setupGRPCServices(config, flat); err != nil { - return nil, err - } + // Initialize external gRPC server + s.setupExternalGRPC(config, logger) // Initialize internal gRPC server. // // Note: some "external" gRPC services are also exposed on the internal gRPC server // to enable RPC forwarding. + s.grpcHandler = newGRPCHandlerFromConfig(flat, config, s) s.grpcLeaderForwarder = flat.LeaderForwarder - // Start watching HCP Link resource. This needs to be created after - // the GRPC services are set up in order for the resource service client to - // function. This uses the insecure grpc channel so that it doesn't need to - // present a valid ACL token. - go hcp.RunHCPLinkWatcher( - &lib.StopChannelContext{StopCh: shutdownCh}, - logger.Named("hcp-link-watcher"), - pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan), - hcp.HCPManagerLifecycleFn( - s.hcpManager, - hcpclient.NewClient, - bootstrap.LoadManagementToken, - flat.HCP.Config, - flat.HCP.DataDir, - ), - ) - - s.controllerManager = controller.NewManager( - // Usage of the insecure + unsafe grpc chan is required for the controller - // manager. It must be unauthorized so that controllers do not need to - // present valid ACL tokens for their requests and it must use the unsafe - // variant so that the controller runtimes indexing/caching layer doesn't - // keep many copies of resources around in memory for long. Care will - // be taken within the controller manager to wrap this client with another - // which clones protobuf types passing through to ensure controllers - // cannot modify the canonical resource service data that has flowed - // through the storage backend. - pbresource.NewResourceServiceClient(s.insecureUnsafeGRPCChan), - s.loggers.Named(logging.ControllerRuntime), - ) - if err := s.registerControllers(flat, proxyUpdater); err != nil { + if err := s.setupInternalResourceService(logger); err != nil { return nil, err } + s.controllerManager = controller.NewManager( + s.internalResourceServiceClient, + logger.Named(logging.ControllerRuntime), + ) + s.registerResources(flat) go s.controllerManager.Run(&lib.StopChannelContext{StopCh: shutdownCh}) go s.trackLeaderChanges() @@ -938,31 +827,13 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, // as establishing leadership could attempt to use autopilot and cause a panic. s.initAutopilot(config) - // Construct the registrator that makes sense for the catalog version - if s.useV2Resources { - s.registrator = V2ConsulRegistrator{ - Logger: serverLogger, - NodeName: s.config.NodeName, - EntMeta: s.config.AgentEnterpriseMeta(), - Client: pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan), - } - } else { - s.registrator = V1ConsulRegistrator{ - Datacenter: s.config.Datacenter, - FSM: s.fsm, - Logger: serverLogger, - NodeName: s.config.NodeName, - RaftApplyFunc: s.raftApplyMsgpack, - } - } - // Start monitoring leadership. This must happen after Serf is set up // since it can fire events when leadership is obtained. go s.monitorLeadership() // Start listening for RPC requests. go func() { - if err := s.internalGRPCHandler.Run(); err != nil { + if err := s.grpcHandler.Run(); err != nil { s.logger.Error("gRPC server failed", "error", err) } }() @@ -980,6 +851,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, // Start the metrics handlers. go s.updateMetrics() + // Now we are setup, configure the HCP manager + go s.hcpManager.Run(&lib.StopChannelContext{StopCh: shutdownCh}) + err = s.runEnterpriseRateLimiterConfigEntryController() if err != nil { return nil, err @@ -988,92 +862,77 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, return s, nil } -func isV1CatalogRequest(rpcName string) bool { - switch { - case strings.HasPrefix(rpcName, "Catalog."), - strings.HasPrefix(rpcName, "Health."), - strings.HasPrefix(rpcName, "ConfigEntry."): - return true - } +func (s *Server) registerResources(deps Deps) { + if stringslice.Contains(deps.Experiments, catalogResourceExperimentName) { + catalog.RegisterTypes(s.typeRegistry) + catalog.RegisterControllers(s.controllerManager, catalog.DefaultControllerDependencies()) - switch rpcName { - case "Internal.EventFire", "Internal.KeyringOperation", "Internal.OIDCAuthMethods": - return false - default: - if strings.HasPrefix(rpcName, "Internal.") { - return true - } - return false + mesh.RegisterTypes(s.typeRegistry) } -} -func (s *Server) registerControllers(deps Deps, proxyUpdater ProxyUpdater) error { - hcpctl.RegisterControllers( - s.controllerManager, hcpctl.ControllerDependencies{ - ResourceApisEnabled: s.useV2Resources, - HCPAllowV2ResourceApis: s.hcpAllowV2Resources, - CloudConfig: deps.HCP.Config, - }, - ) + reaper.RegisterControllers(s.controllerManager) - // When not enabled, the v1 tenancy bridge is used by default. - if s.useV2Tenancy { - tenancy.RegisterControllers( - s.controllerManager, - tenancy.Dependencies{Registry: deps.Registry}, - ) + if s.config.DevMode { + demo.RegisterTypes(s.typeRegistry) + demo.RegisterControllers(s.controllerManager) } +} - if s.useV2Resources { - catalog.RegisterControllers(s.controllerManager) - defaultAllow, err := s.config.ACLResolverSettings.IsDefaultAllow() - if err != nil { - return err - } - - mesh.RegisterControllers(s.controllerManager, mesh.ControllerDependencies{ - TrustBundleFetcher: func() (*pbproxystate.TrustBundle, error) { - var bundle pbproxystate.TrustBundle - roots, err := s.getCARoots(nil, s.GetState()) - if err != nil { - return nil, err - } - bundle.TrustDomain = roots.TrustDomain - for _, root := range roots.Roots { - bundle.Roots = append(bundle.Roots, root.RootCert) - } - return &bundle, nil - }, - // This function is adapted from server_connect.go:getCARoots. - TrustDomainFetcher: func() (string, error) { - _, caConfig, err := s.fsm.State().CAConfig(nil) - if err != nil { - return "", err - } - - return s.getTrustDomain(caConfig) - }, - - LeafCertManager: deps.LeafCertManager, - LocalDatacenter: s.config.Datacenter, - DefaultAllow: defaultAllow, - ProxyUpdater: proxyUpdater, - }) - - auth.RegisterControllers(s.controllerManager, auth.DefaultControllerDependencies()) - multicluster.RegisterControllers(s.controllerManager) - } else { - shim := NewExportedServicesShim(s) - multicluster.RegisterCompatControllers(s.controllerManager, multicluster.DefaultCompatControllerDependencies(shim)) +func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler { + if s.peeringBackend == nil { + panic("peeringBackend is required during construction") } - reaper.RegisterControllers(s.controllerManager) + p := peering.NewServer(peering.Config{ + Backend: s.peeringBackend, + Tracker: s.peerStreamServer.Tracker, + Logger: deps.Logger.Named("grpc-api.peering"), + ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { + // Only forward the request if the dc in the request matches the server's datacenter. + if info.RequestDatacenter() != "" && info.RequestDatacenter() != config.Datacenter { + return false, fmt.Errorf("requests to generate peering tokens cannot be forwarded to remote datacenters") + } + return s.ForwardGRPC(s.grpcConnPool, info, fn) + }, + Datacenter: config.Datacenter, + ConnectEnabled: config.ConnectEnabled, + PeeringEnabled: config.PeeringEnabled, + Locality: config.Locality, + FSMServer: s, + }) + s.peeringServer = p + o := operator.NewServer(operator.Config{ + Backend: s.operatorBackend, + Logger: deps.Logger.Named("grpc-api.operator"), + ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { + // Only forward the request if the dc in the request matches the server's datacenter. + if info.RequestDatacenter() != "" && info.RequestDatacenter() != config.Datacenter { + return false, fmt.Errorf("requests to transfer leader cannot be forwarded to remote datacenters") + } + return s.ForwardGRPC(s.grpcConnPool, info, fn) + }, + Datacenter: config.Datacenter, + }) + s.operatorServer = o - if s.config.DevMode { - demo.RegisterControllers(s.controllerManager) + register := func(srv *grpc.Server) { + if config.RPCConfig.EnableStreaming { + pbsubscribe.RegisterStateChangeSubscriptionServer(srv, subscribe.NewServer( + &subscribeBackend{srv: s, connPool: deps.GRPCConnPool}, + deps.Logger.Named("grpc-api.subscription"))) + } + s.peeringServer.Register(srv) + s.operatorServer.Register(srv) + s.registerEnterpriseGRPCServices(deps, srv) + + // Note: these external gRPC services are also exposed on the internal server to + // enable RPC forwarding. + s.peerStreamServer.Register(srv) + s.externalACLServer.Register(srv) + s.externalConnectCAServer.Register(srv) } - return s.controllerManager.ValidateDependencies(s.registry.Types()) + return agentgrpc.NewHandler(deps.Logger, config.RPCAddr, register, nil, s.incomingRPCLimiter) } func (s *Server) connectCARootsMonitor(ctx context.Context) { @@ -1102,7 +961,7 @@ func (s *Server) connectCARootsMonitor(ctx context.Context) { } // setupRaft is used to setup and initialize Raft -func (s *Server) setupRaft(isCatalogResourceExperiment bool) error { +func (s *Server) setupRaft() error { // If we have an unclean exit then attempt to close the Raft store. defer func() { if s.raft == nil && s.raftStore != nil { @@ -1161,7 +1020,8 @@ func (s *Server) setupRaft(isCatalogResourceExperiment bool) error { return fmt.Errorf("failed trying to see if raft.db exists not sure how to continue: %w", err) } - initWAL := func() error { + // Only use WAL if there is no existing raft.db, even if it's enabled. + if s.config.LogStoreConfig.Backend == LogStoreBackendWAL && !boltFileExists { walDir := filepath.Join(path, "wal") if err := os.MkdirAll(walDir, 0755); err != nil { return err @@ -1180,29 +1040,13 @@ func (s *Server) setupRaft(isCatalogResourceExperiment bool) error { s.raftStore = wal log = wal stable = wal - return nil - } - // Only use WAL if there is no existing raft.db, even if it's enabled. - if s.config.LogStoreConfig.Backend == LogStoreBackendDefault && !boltFileExists && isCatalogResourceExperiment { - s.config.LogStoreConfig.Backend = LogStoreBackendWAL - if !s.config.LogStoreConfig.Verification.Enabled { - s.config.LogStoreConfig.Verification.Enabled = true - s.config.LogStoreConfig.Verification.Interval = 1 * time.Minute - } - if err = initWAL(); err != nil { - return err - } - } else if s.config.LogStoreConfig.Backend == LogStoreBackendWAL && !boltFileExists { - if err = initWAL(); err != nil { - return err - } } else { if s.config.LogStoreConfig.Backend == LogStoreBackendWAL { // User configured the new storage, but still has old raft.db. Warn // them! s.logger.Warn("BoltDB file raft.db found, IGNORING raft_logstore.backend which is set to 'wal'") } - s.config.LogStoreConfig.Backend = LogStoreBackendBoltDB + // Create the backend raft store for logs and stable storage. store, err := raftboltdb.New(raftboltdb.Options{ BoltOptions: &bbolt.Options{ @@ -1425,6 +1269,116 @@ func (s *Server) setupRPC() error { return nil } +// Initialize and register services on external gRPC server. +func (s *Server) setupExternalGRPC(config *Config, logger hclog.Logger) { + s.externalACLServer = aclgrpc.NewServer(aclgrpc.Config{ + ACLsEnabled: s.config.ACLsEnabled, + ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { + return s.ForwardGRPC(s.grpcConnPool, info, fn) + }, + InPrimaryDatacenter: s.InPrimaryDatacenter(), + LoadAuthMethod: func(methodName string, entMeta *acl.EnterpriseMeta) (*structs.ACLAuthMethod, aclgrpc.Validator, error) { + return s.loadAuthMethod(methodName, entMeta) + }, + LocalTokensEnabled: s.LocalTokensEnabled, + Logger: logger.Named("grpc-api.acl"), + NewLogin: func() aclgrpc.Login { return s.aclLogin() }, + NewTokenWriter: func() aclgrpc.TokenWriter { return s.aclTokenWriter() }, + PrimaryDatacenter: s.config.PrimaryDatacenter, + ValidateEnterpriseRequest: s.validateEnterpriseRequest, + }) + s.externalACLServer.Register(s.externalGRPCServer) + + s.externalConnectCAServer = connectca.NewServer(connectca.Config{ + Publisher: s.publisher, + GetStore: func() connectca.StateStore { return s.FSM().State() }, + Logger: logger.Named("grpc-api.connect-ca"), + ACLResolver: s.ACLResolver, + CAManager: s.caManager, + ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { + return s.ForwardGRPC(s.grpcConnPool, info, fn) + }, + ConnectEnabled: s.config.ConnectEnabled, + }) + s.externalConnectCAServer.Register(s.externalGRPCServer) + + dataplane.NewServer(dataplane.Config{ + GetStore: func() dataplane.StateStore { return s.FSM().State() }, + Logger: logger.Named("grpc-api.dataplane"), + ACLResolver: s.ACLResolver, + Datacenter: s.config.Datacenter, + }).Register(s.externalGRPCServer) + + serverdiscovery.NewServer(serverdiscovery.Config{ + Publisher: s.publisher, + ACLResolver: s.ACLResolver, + Logger: logger.Named("grpc-api.server-discovery"), + }).Register(s.externalGRPCServer) + + s.peeringBackend = NewPeeringBackend(s) + s.operatorBackend = NewOperatorBackend(s) + + s.peerStreamServer = peerstream.NewServer(peerstream.Config{ + Backend: s.peeringBackend, + GetStore: func() peerstream.StateStore { return s.FSM().State() }, + Logger: logger.Named("grpc-api.peerstream"), + ACLResolver: s.ACLResolver, + Datacenter: s.config.Datacenter, + ConnectEnabled: s.config.ConnectEnabled, + ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { + // Only forward the request if the dc in the request matches the server's datacenter. + if info.RequestDatacenter() != "" && info.RequestDatacenter() != config.Datacenter { + return false, fmt.Errorf("requests to generate peering tokens cannot be forwarded to remote datacenters") + } + return s.ForwardGRPC(s.grpcConnPool, info, fn) + }, + }) + s.peerStreamServer.Register(s.externalGRPCServer) + + resourcegrpc.NewServer(resourcegrpc.Config{ + Registry: s.typeRegistry, + Backend: s.raftStorageBackend, + ACLResolver: s.ACLResolver, + Logger: logger.Named("grpc-api.resource"), + }).Register(s.externalGRPCServer) +} + +func (s *Server) setupInternalResourceService(logger hclog.Logger) error { + server := grpc.NewServer() + + resourcegrpc.NewServer(resourcegrpc.Config{ + Registry: s.typeRegistry, + Backend: s.raftStorageBackend, + ACLResolver: resolver.DANGER_NO_AUTH{}, + Logger: logger.Named("grpc-api.resource"), + }).Register(server) + + pipe := agentgrpc.NewPipeListener() + go server.Serve(pipe) + + go func() { + <-s.shutdownCh + server.Stop() + }() + + conn, err := grpc.Dial("", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(pipe.DialContext), + grpc.WithBlock(), + ) + if err != nil { + server.Stop() + return err + } + go func() { + <-s.shutdownCh + conn.Close() + }() + s.internalResourceServiceClient = pbresource.NewResourceServiceClient(conn) + + return nil +} + // Shutdown is used to shutdown the server func (s *Server) Shutdown() error { s.logger.Info("shutting down server") @@ -1471,8 +1425,8 @@ func (s *Server) Shutdown() error { s.Listener.Close() } - if s.internalGRPCHandler != nil { - if err := s.internalGRPCHandler.Shutdown(); err != nil { + if s.grpcHandler != nil { + if err := s.grpcHandler.Shutdown(); err != nil { s.logger.Warn("failed to stop gRPC server", "error", err) } } @@ -2127,10 +2081,6 @@ func (s *Server) hcpServerStatus(deps Deps) hcp.StatusCallback { } } -func (s *Server) ResourceServiceClient() pbresource.ResourceServiceClient { - return pbresource.NewResourceServiceClient(s.secureSafeGRPCChan) -} - func fileExists(name string) (bool, error) { _, err := os.Stat(name) if err == nil { diff --git a/agent/consul/server_ce.go b/agent/consul/server_ce.go index ac0df9dd739c0..22660f490b7f2 100644 --- a/agent/consul/server_ce.go +++ b/agent/consul/server_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul @@ -11,18 +12,15 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/serf/coordinate" "github.com/hashicorp/serf/serf" + "google.golang.org/grpc" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/reporting" - resourcegrpc "github.com/hashicorp/consul/agent/grpc-external/services/resource" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/logging" ) // runEnterpriseRateLimiterConfigEntryController start the rate limiter config controller @@ -30,9 +28,7 @@ func (s *Server) runEnterpriseRateLimiterConfigEntryController() error { return nil } -func (s *Server) setupEnterpriseGRPCServices(config *Config, deps Deps) error { - return nil -} +func (s *Server) registerEnterpriseGRPCServices(deps Deps, srv *grpc.Server) {} func (s *Server) enterpriseValidateJoinWAN() error { return nil // no-op @@ -196,15 +192,3 @@ func getEnterpriseReportingDeps(deps Deps) reporting.EntDeps { // no-op return reporting.EntDeps{} } - -// CE version without LicenseManager -func (s *Server) newResourceServiceConfig(typeRegistry resource.Registry, resolver resourcegrpc.ACLResolver, tenancyBridge resourcegrpc.TenancyBridge) resourcegrpc.Config { - return resourcegrpc.Config{ - Registry: typeRegistry, - Backend: s.storageBackend, - ACLResolver: resolver, - Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.Resource), - TenancyBridge: tenancyBridge, - UseV2Tenancy: s.useV2Tenancy, - } -} diff --git a/agent/consul/server_ce_test.go b/agent/consul/server_ce_test.go index 0b75cda1a5b7d..c1760589a9e14 100644 --- a/agent/consul/server_ce_test.go +++ b/agent/consul/server_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package consul diff --git a/agent/consul/server_connect.go b/agent/consul/server_connect.go index 2274aff523bad..496d059cb4941 100644 --- a/agent/consul/server_connect.go +++ b/agent/consul/server_connect.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -19,15 +19,21 @@ func (s *Server) getCARoots(ws memdb.WatchSet, state *state.Store) (*structs.Ind if err != nil { return nil, err } - - trustDomain, err := s.getTrustDomain(config) - if err != nil { - return nil, err + if config == nil || config.ClusterID == "" { + return nil, fmt.Errorf("CA has not finished initializing") } indexedRoots := &structs.IndexedCARoots{} - indexedRoots.TrustDomain = trustDomain + // Build TrustDomain based on the ClusterID stored. + signingID := connect.SpiffeIDSigningForCluster(config.ClusterID) + if signingID == nil { + // If CA is bootstrapped at all then this should never happen but be + // defensive. + return nil, fmt.Errorf("no cluster trust domain setup") + } + + indexedRoots.TrustDomain = signingID.Host() indexedRoots.Index, indexedRoots.Roots = index, roots if indexedRoots.Roots == nil { @@ -71,19 +77,3 @@ func (s *Server) getCARoots(ws memdb.WatchSet, state *state.Store) (*structs.Ind return indexedRoots, nil } - -func (s *Server) getTrustDomain(config *structs.CAConfiguration) (string, error) { - if config == nil || config.ClusterID == "" { - return "", fmt.Errorf("CA has not finished initializing") - } - - // Build TrustDomain based on the ClusterID stored. - signingID := connect.SpiffeIDSigningForCluster(config.ClusterID) - if signingID == nil { - // If CA is bootstrapped at all then this should never happen but be - // defensive. - return "", fmt.Errorf("no cluster trust domain setup") - } - - return signingID.Host(), nil -} diff --git a/agent/consul/server_grpc.go b/agent/consul/server_grpc.go deleted file mode 100644 index a4ff8660951b2..0000000000000 --- a/agent/consul/server_grpc.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package consul - -import ( - "fmt" - - "github.com/armon/go-metrics" - "github.com/fullstorydev/grpchan/inprocgrpc" - middleware "github.com/grpc-ecosystem/go-grpc-middleware" - recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - "google.golang.org/grpc" - "google.golang.org/grpc/reflection" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/acl/resolver" - aclgrpc "github.com/hashicorp/consul/agent/grpc-external/services/acl" - "github.com/hashicorp/consul/agent/grpc-external/services/configentry" - "github.com/hashicorp/consul/agent/grpc-external/services/connectca" - "github.com/hashicorp/consul/agent/grpc-external/services/dataplane" - "github.com/hashicorp/consul/agent/grpc-external/services/peerstream" - resourcegrpc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - "github.com/hashicorp/consul/agent/grpc-external/services/serverdiscovery" - agentgrpc "github.com/hashicorp/consul/agent/grpc-internal" - "github.com/hashicorp/consul/agent/grpc-internal/services/subscribe" - agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware" - "github.com/hashicorp/consul/agent/rpc/operator" - "github.com/hashicorp/consul/agent/rpc/peering" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/internal/tenancy" - "github.com/hashicorp/consul/lib/stringslice" - "github.com/hashicorp/consul/logging" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/proto/private/pbsubscribe" -) - -func (s *Server) setupGRPCInterfaces(config *Config, deps Deps) error { - // A server has 5 different gRPC interfaces - // - // * External - This is the main public gRPC network listener. This - // is an actual *grpc.Server that we have listening on both the - // grpc and grpc_tls ports. Generally this interface will not be - // used by the server itself. All services which are intended - // to be public APIs must be registered to this interface. This - // interface is created outside of the server in the agent code - // and then passed to the NewServer constructor. Some services - // like xDS and DNS get registered outside of the server code. - // - // * Internal / Multiplexed - Our internal_rpc port uses yamux and - // various byte prefixes to multiplex different protocols over - // the single connection. One of the multiplexed protocols is - // gRPC. gRPC in this fashion works using a custom net.Listener - // implementation that receives net.Conns to be handled through - // a channel. When a new yamux session is opened which produces - // a yamux conn (which implements the net.Conn interface), the - // connection is then sent to the custom listener. Then the - // standard grpc.Server.Serve method can accept the conn from - // the listener and operate on it like any other standard conn. - // Historically, the external gRPC interface was optional and - // so all services which needed leader or DC forwarding had to - // be exposed on this interface in order to guarantee they - // would be available. In the future, an external gRPC interface - // likely will be required and the services which need registering - // to the multiplexed listener will be greatly reduced. In the - // very long term we want to get rid of this internal multiplexed - // port/listener and instead have all component communications use - // gRPC natively. For now though, if your service will need to - // RECEIVE forwarded requests then it must be registered to this - // interface. - // - // * In-Process - For routines running on the server we don't want them - // to require network i/o as that will incur a lot of unnecessary - // overhead. To avoid that we are utilizing the `grpchan` library - // (github.com/fullstorydev/grpchan) and its `inprocgrpc` package. - // The library provides the `inprocgrpc.Channel` which implements - // both the `grpc.ServiceRegistrar` and `grpc.ClientConnInterface` - // interfaces. Services get registered to the `Channel` and then - // gRPC service clients can be created with the `Channel` used - // for the backing `ClientConn`. When a client then uses the - // `Invoke` or `NewStream` methods on the `Channel`, the `Channel` - // will lookup in its registry of services to find the service's - // server implementation and then have the standard - // grpc.MethodDesc.Handler function handle the request. We use - // a few variants of the in-process gRPC Channel. For now all - // these channels are created and managed in server code but we - // may need to move these into the higher level agent setup. - // - // * Insecure + Unsafe - The insecure + unsafe gRPC Channel has - // services registered to it that wont do typical ACL - // resolution. Instead when the service resolves ACL tokens - // a resolver is used which always grants unrestricted - // privileges. Additionally, this "unsafe" variant DOES - // NOT clone resources as they pass through the channel. Care - // Must be taken to note mutate the data passed through the - // Channel or else we could easily cause data race related - // or consistency bugs. - // - // * Insecure + Safe - Similar to the Insecure + Unsafe variant, - // ACL resolution always provides an authorizer with unrestricted - // privileges. However, this interface is concurrency/memory safe - // in that protobuf messages passing through the interface are - // cloned so that the client is free to mutate those messages - // once the request is complete. All services registered to the - // Unsafe variant should also be registered to this interface. - // - // * Secure + Safe - This Channel will do typical ACL resolution from - // tokens and will clone protobuf messages that pass through. This - // interface will be useful for something like the HTTP server that - // is crafting the gRPC requests from a user request and wants to - // assume no implicit privileges by the nature of running on the - // server. All services registered to the insecure variants should - // also be registered to this interface. Additionally other services - // that correspond to user requests should also be registered to this - // interface. - // - // Currently there is not a need for a Secure + Unsafe variant. We could - // add it if needed in the future. - - recoveryOpts := agentmiddleware.PanicHandlerMiddlewareOpts(s.loggers.Named(logging.GRPCAPI)) - - inprocLabels := []metrics.Label{{ - Name: "server_type", - Value: "in-process", - }} - - statsHandler := agentmiddleware.NewStatsHandler(metrics.Default(), inprocLabels) - - // TODO(inproc-grpc) - figure out what to do with rate limiting inproc grpc. If we - // want to rate limit in-process clients then we are going to need a unary interceptor - // to do that. Another idea would be to create rate limited clients which can be given - // to controllers or other internal code so that the whole Channel isn't limited but - // rather individual consumers of that channel. - - // Build the Insecure + Unsafe gRPC Channel - s.insecureUnsafeGRPCChan = new(inprocgrpc.Channel). - // Bypass the in-process gRPCs cloning functionality by providing - // a Cloner implementation which doesn't actually clone the data. - // Note that this is only done for the Unsafe gRPC Channel and - // all the Safe variants will utilize the default cloning - // functionality. - WithCloner(inprocgrpc.CloneFunc(func(in any) (any, error) { - return in, nil - })). - WithServerUnaryInterceptor(middleware.ChainUnaryServer( - recovery.UnaryServerInterceptor(recoveryOpts...), - statsHandler.Intercept, - )). - WithServerStreamInterceptor(middleware.ChainStreamServer( - recovery.StreamServerInterceptor(recoveryOpts...), - agentmiddleware.NewActiveStreamCounter(metrics.Default(), inprocLabels).Intercept, - )) - - // Build the Insecure + Safe gRPC Channel - s.insecureSafeGRPCChan = new(inprocgrpc.Channel). - WithServerUnaryInterceptor(middleware.ChainUnaryServer( - recovery.UnaryServerInterceptor(recoveryOpts...), - statsHandler.Intercept, - )). - WithServerStreamInterceptor(middleware.ChainStreamServer( - recovery.StreamServerInterceptor(recoveryOpts...), - agentmiddleware.NewActiveStreamCounter(metrics.Default(), inprocLabels).Intercept, - )) - - // Build the Secure + Safe gRPC Channel - s.secureSafeGRPCChan = new(inprocgrpc.Channel). - WithServerUnaryInterceptor(middleware.ChainUnaryServer( - recovery.UnaryServerInterceptor(recoveryOpts...), - statsHandler.Intercept, - )). - WithServerStreamInterceptor(middleware.ChainStreamServer( - recovery.StreamServerInterceptor(recoveryOpts...), - agentmiddleware.NewActiveStreamCounter(metrics.Default(), inprocLabels).Intercept, - )) - - // create the internal multiplexed gRPC interface - s.internalGRPCHandler = agentgrpc.NewHandler(deps.Logger, config.RPCAddr, nil, s.incomingRPCLimiter) - - return nil -} - -func (s *Server) setupGRPCServices(config *Config, deps Deps) error { - // Register the resource service with the in-process registrars WITHOUT AUTHORIZATION - err := s.registerResourceServiceServer( - deps.Registry, - resolver.DANGER_NO_AUTH{}, - s.insecureUnsafeGRPCChan, - s.insecureSafeGRPCChan) - if err != nil { - return err - } - - // Register the resource service with all other registrars other - // than the internal/multiplexed interface. Currently there is - // no need to forward resource service RPCs and therefore the - // service doesn't need to be available on that interface. - err = s.registerResourceServiceServer( - deps.Registry, - s.ACLResolver, - s.secureSafeGRPCChan, - s.internalGRPCHandler, - s.externalGRPCServer, - ) - if err != nil { - return err - } - - // The ACL grpc services get registered with all "secure" gRPC interfaces - err = s.registerACLServer( - s.secureSafeGRPCChan, - s.externalGRPCServer, - s.internalGRPCHandler, - ) - if err != nil { - return err - } - - // register the Connect CA service on all "secure" interfaces - err = s.registerConnectCAServer( - s.secureSafeGRPCChan, - s.externalGRPCServer, - s.internalGRPCHandler, - ) - if err != nil { - return err - } - - // Initializing the peering backend must be done before - // creating any peering servers. There is other code which - // calls methods on this and so the backend must be stored - // on the Server type. In the future we should investigate - // whether we can not require the backend in that other code. - s.peeringBackend = NewPeeringBackend(s) - - // register the peering service on the external gRPC server only - // As this service is only ever accessed externally there is - // no need to register it on the various in-process Channels - s.peerStreamServer, err = s.registerPeerStreamServer( - config, - s.externalGRPCServer, - s.internalGRPCHandler, - ) - if err != nil { - return err - } - - // register the peering service on the internal interface only. As - // the peering gRPC service is a private API its only ever accessed - // via the internalGRPCHandler with an actual network conn managed - // by the Agents GRPCConnPool. - err = s.registerPeeringServer( - config, - s.internalGRPCHandler, - ) - if err != nil { - return err - } - - // Register the Operator service on all "secure" interfaces. The - // operator service is currently only accessed via the - // internalGRPCHandler but in the future these APIs are likely to - // become part of our "public" API and so it should be exposed on - // more interfaces. - err = s.registerOperatorServer( - config, - deps, - s.internalGRPCHandler, - s.secureSafeGRPCChan, - s.externalGRPCServer, - ) - if err != nil { - return err - } - - // register the stream subscription service on the multiplexed internal interface - // if stream is enabled. - if config.RPCConfig.EnableStreaming { - err = s.registerStreamSubscriptionServer( - deps, - s.internalGRPCHandler, - ) - if err != nil { - return err - } - } - - // register the server discovery service on all "secure" interfaces other - // than the multiplexed internal interface. This service is mainly consumed - // by the consul-server-connection-manager library which is used by various - // other system components other than the agent. - err = s.registerServerDiscoveryServer( - s.ACLResolver, - s.secureSafeGRPCChan, - s.externalGRPCServer, - ) - if err != nil { - return err - } - - // register the server discovery service on the insecure in-process channels. - // Currently, this is unused but eventually things such as the peering service - // should be refactored to consume the in-memory service instead of hooking - // directly into an the event publisher and subscribing to specific events. - err = s.registerServerDiscoveryServer( - resolver.DANGER_NO_AUTH{}, - s.insecureUnsafeGRPCChan, - s.insecureSafeGRPCChan, - ) - if err != nil { - return err - } - - // register the data plane service on the external gRPC server only. This - // service is only access by dataplanes and at this time there is no need - // for anything internal in Consul to use the service. If that changes - // we could register it on the in-process interfaces as well. - err = s.registerDataplaneServer( - deps, - s.externalGRPCServer, - ) - if err != nil { - return err - } - - // register the configEntry service on the internal interface only. As - // it is only accessed via the internalGRPCHandler with an actual network - // conn managed by the Agents GRPCConnPool. - err = s.registerConfigEntryServer( - s.internalGRPCHandler, - ) - if err != nil { - return err - } - - // enable grpc server reflection for the external gRPC interface only - reflection.Register(s.externalGRPCServer) - - return s.setupEnterpriseGRPCServices(config, deps) -} - -func (s *Server) registerResourceServiceServer(typeRegistry resource.Registry, resolver resourcegrpc.ACLResolver, registrars ...grpc.ServiceRegistrar) error { - if s.storageBackend == nil { - return fmt.Errorf("storage backend cannot be nil") - } - - var tenancyBridge resourcegrpc.TenancyBridge - if s.useV2Tenancy { - tenancyBridge = tenancy.NewV2TenancyBridge().WithClient( - // This assumes that the resource service will be registered with - // the insecureUnsafeGRPCChan. We are using the insecure and unsafe - // channel here because the V2 Tenancy bridge only reads data - // from the client and does not modify it. Therefore sharing memory - // with the resource services canonical immutable data is advantageous - // to prevent wasting CPU time for every resource op to clone things. - pbresource.NewResourceServiceClient(s.insecureUnsafeGRPCChan), - ) - } else { - tenancyBridge = NewV1TenancyBridge(s) - } - - // Create the Resource Service Server - srv := resourcegrpc.NewServer(s.newResourceServiceConfig(typeRegistry, resolver, tenancyBridge)) - - // Register the server to all the desired interfaces - for _, reg := range registrars { - pbresource.RegisterResourceServiceServer(reg, srv) - } - return nil -} - -func (s *Server) registerACLServer(registrars ...grpc.ServiceRegistrar) error { - srv := aclgrpc.NewServer(aclgrpc.Config{ - ACLsEnabled: s.config.ACLsEnabled, - ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { - return s.ForwardGRPC(s.grpcConnPool, info, fn) - }, - InPrimaryDatacenter: s.InPrimaryDatacenter(), - LoadAuthMethod: func(methodName string, entMeta *acl.EnterpriseMeta) (*structs.ACLAuthMethod, aclgrpc.Validator, error) { - return s.loadAuthMethod(methodName, entMeta) - }, - LocalTokensEnabled: s.LocalTokensEnabled, - Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.ACL), - NewLogin: func() aclgrpc.Login { return s.aclLogin() }, - NewTokenWriter: func() aclgrpc.TokenWriter { return s.aclTokenWriter() }, - PrimaryDatacenter: s.config.PrimaryDatacenter, - ValidateEnterpriseRequest: s.validateEnterpriseRequest, - }) - - for _, reg := range registrars { - srv.Register(reg) - } - - return nil -} - -func (s *Server) registerPeerStreamServer(config *Config, registrars ...grpc.ServiceRegistrar) (*peerstream.Server, error) { - if s.peeringBackend == nil { - panic("peeringBackend is required during construction") - } - - srv := peerstream.NewServer(peerstream.Config{ - Backend: s.peeringBackend, - GetStore: func() peerstream.StateStore { return s.FSM().State() }, - Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.PeerStream), - ACLResolver: s.ACLResolver, - Datacenter: s.config.Datacenter, - ConnectEnabled: s.config.ConnectEnabled, - ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { - // Only forward the request if the dc in the request matches the server's datacenter. - if info.RequestDatacenter() != "" && info.RequestDatacenter() != config.Datacenter { - return false, fmt.Errorf("requests to generate peering tokens cannot be forwarded to remote datacenters") - } - return s.ForwardGRPC(s.grpcConnPool, info, fn) - }, - }) - - for _, reg := range registrars { - srv.Register(reg) - } - - return srv, nil -} - -func (s *Server) registerPeeringServer(config *Config, registrars ...grpc.ServiceRegistrar) error { - if s.peeringBackend == nil { - panic("peeringBackend is required during construction") - } - - if s.peerStreamServer == nil { - panic("the peer stream server must be configured before the peering server") - } - - srv := peering.NewServer(peering.Config{ - Backend: s.peeringBackend, - Tracker: s.peerStreamServer.Tracker, - Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.Peering), - ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { - // Only forward the request if the dc in the request matches the server's datacenter. - if info.RequestDatacenter() != "" && info.RequestDatacenter() != config.Datacenter { - return false, fmt.Errorf("requests to generate peering tokens cannot be forwarded to remote datacenters") - } - return s.ForwardGRPC(s.grpcConnPool, info, fn) - }, - Datacenter: config.Datacenter, - ConnectEnabled: config.ConnectEnabled, - PeeringEnabled: config.PeeringEnabled, - Locality: config.Locality, - FSMServer: s, - }) - - for _, reg := range registrars { - srv.Register(reg) - } - - return nil -} - -func (s *Server) registerOperatorServer(config *Config, deps Deps, registrars ...grpc.ServiceRegistrar) error { - srv := operator.NewServer(operator.Config{ - Backend: NewOperatorBackend(s), - Logger: deps.Logger.Named("grpc-api.operator"), - ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { - // Only forward the request if the dc in the request matches the server's datacenter. - if info.RequestDatacenter() != "" && info.RequestDatacenter() != config.Datacenter { - return false, fmt.Errorf("requests to transfer leader cannot be forwarded to remote datacenters") - } - return s.ForwardGRPC(s.grpcConnPool, info, fn) - }, - Datacenter: config.Datacenter, - }) - - for _, reg := range registrars { - srv.Register(reg) - } - - return nil -} - -func (s *Server) registerStreamSubscriptionServer(deps Deps, registrars ...grpc.ServiceRegistrar) error { - srv := subscribe.NewServer( - &subscribeBackend{srv: s, connPool: deps.GRPCConnPool}, - s.loggers.Named(logging.GRPCAPI).Named("subscription"), - ) - - for _, reg := range registrars { - pbsubscribe.RegisterStateChangeSubscriptionServer(reg, srv) - } - - return nil -} - -func (s *Server) registerConnectCAServer(registrars ...grpc.ServiceRegistrar) error { - srv := connectca.NewServer(connectca.Config{ - Publisher: s.publisher, - GetStore: func() connectca.StateStore { return s.FSM().State() }, - Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.ConnectCA), - ACLResolver: s.ACLResolver, - CAManager: s.caManager, - ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { - return s.ForwardGRPC(s.grpcConnPool, info, fn) - }, - ConnectEnabled: s.config.ConnectEnabled, - }) - - for _, reg := range registrars { - srv.Register(reg) - } - - return nil -} - -func (s *Server) registerDataplaneServer(deps Deps, registrars ...grpc.ServiceRegistrar) error { - srv := dataplane.NewServer(dataplane.Config{ - GetStore: func() dataplane.StateStore { return s.FSM().State() }, - Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.Dataplane), - ACLResolver: s.ACLResolver, - Datacenter: s.config.Datacenter, - EnableV2: stringslice.Contains(deps.Experiments, CatalogResourceExperimentName), - ResourceAPIClient: pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan), - }) - - for _, reg := range registrars { - srv.Register(reg) - } - - return nil -} - -func (s *Server) registerServerDiscoveryServer(resolver serverdiscovery.ACLResolver, registrars ...grpc.ServiceRegistrar) error { - srv := serverdiscovery.NewServer(serverdiscovery.Config{ - Publisher: s.publisher, - ACLResolver: resolver, - Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.ServerDiscovery), - }) - - for _, reg := range registrars { - srv.Register(reg) - } - - return nil -} - -func (s *Server) registerConfigEntryServer(registrars ...grpc.ServiceRegistrar) error { - - srv := configentry.NewServer(configentry.Config{ - Backend: NewConfigEntryBackend(s), - Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.ConfigEntry), - ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { - return s.ForwardGRPC(s.grpcConnPool, info, fn) - }, - FSMServer: s, - }) - - for _, reg := range registrars { - srv.Register(reg) - } - - return nil -} diff --git a/agent/consul/server_log_verification.go b/agent/consul/server_log_verification.go index 2bde7dbc81b40..5646e78760989 100644 --- a/agent/consul/server_log_verification.go +++ b/agent/consul/server_log_verification.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/server_lookup.go b/agent/consul/server_lookup.go index 60b9c076bf148..e1952d671d07d 100644 --- a/agent/consul/server_lookup.go +++ b/agent/consul/server_lookup.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/server_lookup_test.go b/agent/consul/server_lookup_test.go index 52e3605de719b..5d3d3d4e0e42f 100644 --- a/agent/consul/server_lookup_test.go +++ b/agent/consul/server_lookup_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/server_metadata.go b/agent/consul/server_metadata.go index b9a4eaf2eb390..231571fcadfde 100644 --- a/agent/consul/server_metadata.go +++ b/agent/consul/server_metadata.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/server_metadata_test.go b/agent/consul/server_metadata_test.go index a263902a2ce2d..c47be269d8519 100644 --- a/agent/consul/server_metadata_test.go +++ b/agent/consul/server_metadata_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/server_overview.go b/agent/consul/server_overview.go index a94749d53498a..62bdb34406121 100644 --- a/agent/consul/server_overview.go +++ b/agent/consul/server_overview.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/server_overview_test.go b/agent/consul/server_overview_test.go index 7780b5ce83967..ebb930a1e2bf2 100644 --- a/agent/consul/server_overview_test.go +++ b/agent/consul/server_overview_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/server_register.go b/agent/consul/server_register.go index 90d95f061956f..61f1daefc7dfa 100644 --- a/agent/consul/server_register.go +++ b/agent/consul/server_register.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/server_serf.go b/agent/consul/server_serf.go index aea50aa6dd199..1dc6c25b1cce5 100644 --- a/agent/consul/server_serf.go +++ b/agent/consul/server_serf.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 19cbd568496e3..7054f8c6ec2a2 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -37,13 +37,10 @@ import ( external "github.com/hashicorp/consul/agent/grpc-external" grpcmiddleware "github.com/hashicorp/consul/agent/grpc-middleware" hcpclient "github.com/hashicorp/consul/agent/hcp/client" - hcpconfig "github.com/hashicorp/consul/agent/hcp/config" - "github.com/hashicorp/consul/agent/leafcert" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" - proxytracker "github.com/hashicorp/consul/internal/mesh/proxy-tracker" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/sdk/testutil" @@ -120,7 +117,7 @@ func waitForLeaderEstablishment(t *testing.T, servers ...*Server) { }) } -func testServerConfig(t testutil.TestingTB) (string, *Config) { +func testServerConfig(t *testing.T) (string, *Config) { dir := testutil.TempDir(t, "consul") config := DefaultConfig() @@ -229,12 +226,6 @@ func testServerDCExpect(t *testing.T, dc string, expect int) (string, *Server) { } func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *Server) { - return testServerWithDepsAndConfig(t, nil, configOpts...) -} - -// testServerWithDepsAndConfig is similar to testServerWithConfig except that it also allows modifying dependencies. -// This is useful for things like injecting experiment flags. -func testServerWithDepsAndConfig(t *testing.T, depOpts func(*Deps), configOpts ...func(*Config)) (string, *Server) { var dir string var srv *Server @@ -242,7 +233,7 @@ func testServerWithDepsAndConfig(t *testing.T, depOpts func(*Deps), configOpts . var deps Deps // Retry added to avoid cases where bind addr is already in use retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) { - dir, config = testServerConfig(r) + dir, config = testServerConfig(t) for _, fn := range configOpts { fn(config) } @@ -255,13 +246,8 @@ func testServerWithDepsAndConfig(t *testing.T, depOpts func(*Deps), configOpts . config.ACLResolverSettings.EnterpriseMeta = *config.AgentEnterpriseMeta() var err error - deps = newDefaultDeps(r, config) - - if depOpts != nil { - depOpts(&deps) - } - - srv, err = newServerWithDeps(r, config, deps) + deps = newDefaultDeps(t, config) + srv, err = newServerWithDeps(t, config, deps) if err != nil { r.Fatalf("err: %v", err) } @@ -341,7 +327,7 @@ func newServer(t *testing.T, c *Config) (*Server, error) { return newServerWithDeps(t, c, newDefaultDeps(t, c)) } -func newServerWithDeps(t testutil.TestingTB, c *Config, deps Deps) (*Server, error) { +func newServerWithDeps(t *testing.T, c *Config, deps Deps) (*Server, error) { // chain server up notification oldNotify := c.NotifyListen up := make(chan struct{}) @@ -351,9 +337,8 @@ func newServerWithDeps(t testutil.TestingTB, c *Config, deps Deps) (*Server, err oldNotify() } } - grpcServer := external.NewServer(deps.Logger.Named("grpc.external"), nil, deps.TLSConfigurator, rpcRate.NullRequestLimitsHandler(), keepalive.ServerParameters{}, nil) - proxyUpdater := proxytracker.NewProxyTracker(proxytracker.ProxyTrackerConfig{}) - srv, err := NewServer(c, deps, grpcServer, nil, deps.Logger, proxyUpdater) + grpcServer := external.NewServer(deps.Logger.Named("grpc.external"), nil, deps.TLSConfigurator, rpcRate.NullRequestLimitsHandler(), keepalive.ServerParameters{}) + srv, err := NewServer(c, deps, grpcServer, nil, deps.Logger) if err != nil { return nil, err } @@ -1260,7 +1245,7 @@ func TestServer_RPC_MetricsIntercept_Off(t *testing.T) { } } - s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil) + s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger) if err != nil { t.Fatalf("err: %v", err) } @@ -1298,7 +1283,7 @@ func TestServer_RPC_MetricsIntercept_Off(t *testing.T) { return nil } - s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil) + s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger) if err != nil { t.Fatalf("err: %v", err) } @@ -1332,7 +1317,7 @@ func TestServer_RPC_RequestRecorder(t *testing.T) { deps := newDefaultDeps(t, conf) deps.NewRequestRecorderFunc = nil - s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil) + s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger) require.Error(t, err, "need err when provider func is nil") require.Equal(t, err.Error(), "cannot initialize server without an RPC request recorder provider") @@ -1351,7 +1336,7 @@ func TestServer_RPC_RequestRecorder(t *testing.T) { return nil } - s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil) + s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger) require.Error(t, err, "need err when RequestRecorder is nil") require.Equal(t, err.Error(), "cannot initialize server with a nil RPC request recorder") @@ -2096,8 +2081,6 @@ func TestServer_Peering_LeadershipCheck(t *testing.T) { func TestServer_hcpManager(t *testing.T) { _, conf1 := testServerConfig(t) - - // Configure the server for the StatusFn conf1.BootstrapExpect = 1 conf1.RPCAdvertise = &net.TCPAddr{IP: []byte{127, 0, 0, 2}, Port: conf1.RPCAddr.Port} hcp1 := hcpclient.NewMockClient(t) @@ -2107,10 +2090,8 @@ func TestServer_hcpManager(t *testing.T) { require.Equal(t, status.LanAddress, "127.0.0.2") }).Call.Return(nil) - // Configure the server for the ManagementTokenUpserterFn - conf1.ACLsEnabled = true - deps1 := newDefaultDeps(t, conf1) + deps1.HCP.Client = hcp1 s1, err := newServerWithDeps(t, conf1, deps1) if err != nil { t.Fatalf("err: %v", err) @@ -2118,36 +2099,8 @@ func TestServer_hcpManager(t *testing.T) { defer s1.Shutdown() require.NotNil(t, s1.hcpManager) waitForLeaderEstablishment(t, s1) - - // Update the HCP manager and start it - token, err := uuid.GenerateUUID() - require.NoError(t, err) - s1.hcpManager.UpdateConfig(hcp1, hcpconfig.CloudConfig{ - ManagementToken: token, - }) - err = s1.hcpManager.Start(context.Background()) - require.NoError(t, err) - - // Validate that the server status pushed as expected hcp1.AssertExpectations(t) - // Validate that the HCP token has been created as expected - retry.Run(t, func(r *retry.R) { - _, createdToken, err := s1.fsm.State().ACLTokenGetBySecret(nil, token, nil) - require.NoError(r, err) - require.NotNil(r, createdToken) - }) - - // Stop the HCP manager - err = s1.hcpManager.Stop() - require.NoError(t, err) - - // Validate that the HCP token has been deleted as expected - retry.Run(t, func(r *retry.R) { - _, createdToken, err := s1.fsm.State().ACLTokenGetBySecret(nil, token, nil) - require.NoError(r, err) - require.Nil(r, createdToken) - }) } func TestServer_addServerTLSInfo(t *testing.T) { @@ -2293,35 +2246,3 @@ func TestServer_addServerTLSInfo(t *testing.T) { }) } } - -func TestServer_ControllerDependencies(t *testing.T) { - // The original goal of this test was to track controller/resource type dependencies - // as they change over time. However, the test is difficult to maintain and provides - // only limited value as we were not even performing validations on them. The Server - // type itself will validate that no cyclical dependencies exist so this test really - // only produces a visual representation of the dependencies. That comes at the expense - // of having to maintain the golden files. What further complicates this is that - // Consul Enterprise will have potentially different dependencies that don't exist - // in CE. Therefore if we want to maintain this test, we would need to have a separate - // Enterprise and CE golden files and any CE PR which causes regeneration of the golden - // file would require another commit in enterprise to regen the enterprise golden file - // even if no new enterprise watches were added. - // - // Therefore until we have a better way of managing this, the test will be skipped. - t.Skip("This test would be very difficult to maintain and provides limited value") - - _, conf := testServerConfig(t) - deps := newDefaultDeps(t, conf) - deps.Experiments = []string{"resource-apis", "v2tenancy"} - deps.LeafCertManager = &leafcert.Manager{} - - s1, err := newServerWithDeps(t, conf, deps) - require.NoError(t, err) - - waitForLeaderEstablishment(t, s1) - // gotest.tools/v3 defines CLI flags which are incompatible wit the golden package - // Once we eliminate gotest.tools/v3 from usage within Consul we could uncomment this - // actual := fmt.Sprintf("```mermaid\n%s\n```", s1.controllerManager.CalculateDependencies(s1.registry.Types()).ToMermaid()) - // expected := golden.Get(t, actual, "v2-resource-dependencies") - // require.Equal(t, expected, actual) -} diff --git a/agent/consul/servercert/manager.go b/agent/consul/servercert/manager.go index 664753439477d..75c2a4f276082 100644 --- a/agent/consul/servercert/manager.go +++ b/agent/consul/servercert/manager.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package servercert diff --git a/agent/consul/servercert/manager_test.go b/agent/consul/servercert/manager_test.go index e9cc0c81c58c1..dfadfe4b953fb 100644 --- a/agent/consul/servercert/manager_test.go +++ b/agent/consul/servercert/manager_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package servercert diff --git a/agent/consul/session_endpoint.go b/agent/consul/session_endpoint.go index f2f8ab7740234..6e41138f983c6 100644 --- a/agent/consul/session_endpoint.go +++ b/agent/consul/session_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/session_endpoint_test.go b/agent/consul/session_endpoint_test.go index 408cd7b058c59..ae04d2658f9a7 100644 --- a/agent/consul/session_endpoint_test.go +++ b/agent/consul/session_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/session_timers.go b/agent/consul/session_timers.go index f1c62b08a8196..b4c1b425cb267 100644 --- a/agent/consul/session_timers.go +++ b/agent/consul/session_timers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/session_timers_test.go b/agent/consul/session_timers_test.go index f944cc76745d8..d44ed2b366fed 100644 --- a/agent/consul/session_timers_test.go +++ b/agent/consul/session_timers_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/session_ttl.go b/agent/consul/session_ttl.go index 8f5440e14dffa..7866ec8fed198 100644 --- a/agent/consul/session_ttl.go +++ b/agent/consul/session_ttl.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/session_ttl_test.go b/agent/consul/session_ttl_test.go index e552f0ff6cdff..5cd720f3f8933 100644 --- a/agent/consul/session_ttl_test.go +++ b/agent/consul/session_ttl_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/snapshot_endpoint.go b/agent/consul/snapshot_endpoint.go index c9a6e9ace47c8..7e5f21113aebd 100644 --- a/agent/consul/snapshot_endpoint.go +++ b/agent/consul/snapshot_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // The snapshot endpoint is a special non-RPC endpoint that supports streaming // for taking and restoring snapshots for disaster recovery. This gets wired diff --git a/agent/consul/snapshot_endpoint_test.go b/agent/consul/snapshot_endpoint_test.go index 40bede9149743..f401bb72e38d0 100644 --- a/agent/consul/snapshot_endpoint_test.go +++ b/agent/consul/snapshot_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/state/acl.go b/agent/consul/state/acl.go index 34c26c621ab95..f57c3387352de 100644 --- a/agent/consul/state/acl.go +++ b/agent/consul/state/acl.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state @@ -526,8 +526,7 @@ func aclTokenSetTxn(tx WriteTxn, idx uint64, token *structs.ACLToken, opts ACLTo } if opts.ProhibitUnprivileged { - if numValidRoles == 0 && numValidPolicies == 0 && len(token.ServiceIdentities) == 0 && - len(token.NodeIdentities) == 0 && len(token.TemplatedPolicies) == 0 { + if numValidRoles == 0 && numValidPolicies == 0 && len(token.ServiceIdentities) == 0 && len(token.NodeIdentities) == 0 { return ErrTokenHasNoPrivileges } } @@ -1178,26 +1177,6 @@ func aclRoleSetTxn(tx WriteTxn, idx uint64, role *structs.ACLRole, allowMissing } } - for _, templatedPolicy := range role.TemplatedPolicies { - if templatedPolicy.TemplateName == "" { - return fmt.Errorf("encountered a Role %s (%s) with an empty templated policy name in the state store", role.Name, role.ID) - } - - baseTemplate, ok := structs.GetACLTemplatedPolicyBase(templatedPolicy.TemplateName) - if !ok { - return fmt.Errorf("encountered a Role %s (%s) with an invalid templated policy name %q", role.Name, role.ID, templatedPolicy.TemplateName) - } - - if templatedPolicy.TemplateID == "" { - templatedPolicy.TemplateID = baseTemplate.TemplateID - } - - err := templatedPolicy.ValidateTemplatedPolicy(baseTemplate.Schema) - if err != nil { - return fmt.Errorf("encountered a Role %s (%s) with an invalid templated policy: %w", role.Name, role.ID, err) - } - } - if err := aclRoleUpsertValidateEnterprise(tx, role, existing); err != nil { return err } diff --git a/agent/consul/state/acl_ce.go b/agent/consul/state/acl_ce.go index 9e1dd7ebdbb11..33b3b5986fcaa 100644 --- a/agent/consul/state/acl_ce.go +++ b/agent/consul/state/acl_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/acl_ce_test.go b/agent/consul/state/acl_ce_test.go index 77b5bf3adb5a9..4d3bcdcfc00b7 100644 --- a/agent/consul/state/acl_ce_test.go +++ b/agent/consul/state/acl_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/acl_events.go b/agent/consul/state/acl_events.go index d00062c23bac9..3767d2d2d1542 100644 --- a/agent/consul/state/acl_events.go +++ b/agent/consul/state/acl_events.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/acl_events_test.go b/agent/consul/state/acl_events_test.go index 3c6e3fdfab174..303d54a25be1b 100644 --- a/agent/consul/state/acl_events_test.go +++ b/agent/consul/state/acl_events_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/acl_schema.go b/agent/consul/state/acl_schema.go index d96e097122251..75ca0f3a26893 100644 --- a/agent/consul/state/acl_schema.go +++ b/agent/consul/state/acl_schema.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state @@ -10,7 +10,6 @@ import ( "github.com/hashicorp/go-memdb" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" ) const ( @@ -410,7 +409,7 @@ func indexExpiresFromACLToken(t *structs.ACLToken, local bool) ([]byte, error) { } func indexServiceNameFromACLToken(token *structs.ACLToken) ([][]byte, error) { - vals := make([][]byte, 0, len(token.ServiceIdentities)+len(token.TemplatedPolicies)) + vals := make([][]byte, 0, len(token.ServiceIdentities)) for _, id := range token.ServiceIdentities { if id != nil && id.ServiceName != "" { var b indexBuilder @@ -418,15 +417,6 @@ func indexServiceNameFromACLToken(token *structs.ACLToken) ([][]byte, error) { vals = append(vals, b.Bytes()) } } - - for _, tp := range token.TemplatedPolicies { - if tp != nil && tp.TemplateName == api.ACLTemplatedPolicyServiceName && tp.TemplateVariables != nil && tp.TemplateVariables.Name != "" { - var b indexBuilder - b.String(strings.ToLower(tp.TemplateVariables.Name)) - vals = append(vals, b.Bytes()) - } - } - if len(vals) == 0 { return nil, errMissingValueForIndex } diff --git a/agent/consul/state/acl_test.go b/agent/consul/state/acl_test.go index 358d2d07fdb80..eec05b5fa9d2b 100644 --- a/agent/consul/state/acl_test.go +++ b/agent/consul/state/acl_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state @@ -868,33 +868,6 @@ func TestStateStore_ACLToken_List(t *testing.T) { }, Local: true, }, - // templated policy: the serviceName specific token - &structs.ACLToken{ - AccessorID: "2f89e357-dedb-8d8f-7f30-1f465a41508a", - SecretID: "21ab62c9-5372-038c-b6ba-424961cb38c7", - TemplatedPolicies: []*structs.ACLTemplatedPolicy{ - { - TemplateName: "builtin/service", - TemplateVariables: &structs.ACLTemplatedPolicyVariables{ - Name: "service-1", - }, - }, - }, - }, - // templated policy: the serviceName specific token and local - &structs.ACLToken{ - AccessorID: "5e5d6269-f933-3af2-fe30-259b050223f9", - SecretID: "89a456eb-5d55-9a65-92e1-96935dc5b358", - TemplatedPolicies: []*structs.ACLTemplatedPolicy{ - { - TemplateName: "builtin/service", - TemplateVariables: &structs.ACLTemplatedPolicyVariables{ - Name: "service-1", - }, - }, - }, - Local: true, - }, } require.NoError(t, s.ACLTokenBatchSet(2, tokens, ACLTokenSetOptions{})) @@ -920,7 +893,6 @@ func TestStateStore_ACLToken_List(t *testing.T) { methodName: "", accessors: []string{ acl.AnonymousTokenID, - "2f89e357-dedb-8d8f-7f30-1f465a41508a", // templated policy: serviceName + global "47eea4da-bda1-48a6-901c-3e36d2d9262f", // policy + global "54866514-3cf2-4fec-8a8a-710583831834", // mgmt + global "74277ae1-6a9b-4035-b444-2370fe6a2cb5", // authMethod + global @@ -938,7 +910,6 @@ func TestStateStore_ACLToken_List(t *testing.T) { accessors: []string{ "211f0360-ef53-41d3-9d4d-db84396eb6c0", // authMethod + local "4915fc9d-3726-4171-b588-6c271f45eecd", // policy + local - "5e5d6269-f933-3af2-fe30-259b050223f9", // templated policies: serviceName + local "a14fa45e-0afe-4b44-961d-a430030ccfe2", // serviceName + local "cadb4f13-f62a-49ab-ab3f-5a7e01b925d9", // role + local "f1093997-b6c7-496d-bfb8-6b1b1895641b", // mgmt + local @@ -1059,58 +1030,18 @@ func TestStateStore_ACLToken_List(t *testing.T) { }, }, { - name: "templated policy: ServiceName - Global", - local: false, - global: true, - policy: "", - role: "", - methodName: "", - serviceName: "service-1", - accessors: []string{ - "2f89e357-dedb-8d8f-7f30-1f465a41508a", // serviceName + global - }, - }, - { - name: "templated policy: ServiceName - Local", - local: true, - global: false, - policy: "", - role: "", - methodName: "", - serviceName: "service-1", - accessors: []string{ - "5e5d6269-f933-3af2-fe30-259b050223f9", // serviceName + local - }, - }, - { - name: "templated policy: ServiceName - All", - local: true, - global: true, - policy: "", - role: "", - methodName: "", - serviceName: "service-1", - accessors: []string{ - "2f89e357-dedb-8d8f-7f30-1f465a41508a", // serviceName + global - "5e5d6269-f933-3af2-fe30-259b050223f9", // serviceName + local - }, - }, - { - name: "All", - local: true, - global: true, - policy: "", - role: "", - methodName: "", - serviceName: "", + name: "All", + local: true, + global: true, + policy: "", + role: "", + methodName: "", accessors: []string{ acl.AnonymousTokenID, "211f0360-ef53-41d3-9d4d-db84396eb6c0", // authMethod + local - "2f89e357-dedb-8d8f-7f30-1f465a41508a", // templated policy: serviceName + global "47eea4da-bda1-48a6-901c-3e36d2d9262f", // policy + global "4915fc9d-3726-4171-b588-6c271f45eecd", // policy + local "54866514-3cf2-4fec-8a8a-710583831834", // mgmt + global - "5e5d6269-f933-3af2-fe30-259b050223f9", // templated policy: serviceName + local "74277ae1-6a9b-4035-b444-2370fe6a2cb5", // authMethod + global "80c900e1-2fc5-4685-ae29-1b2d17fc30e4", // serviceName + global "a14fa45e-0afe-4b44-961d-a430030ccfe2", // serviceName + local diff --git a/agent/consul/state/autopilot.go b/agent/consul/state/autopilot.go index 608f08f5215c2..472ce4bfc31c2 100644 --- a/agent/consul/state/autopilot.go +++ b/agent/consul/state/autopilot.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/autopilot_test.go b/agent/consul/state/autopilot_test.go index a2877e2df5ffa..f26163bc4ec1b 100644 --- a/agent/consul/state/autopilot_test.go +++ b/agent/consul/state/autopilot_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 8973381f2b08a..aa11754c297c5 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state @@ -1318,6 +1318,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, EnterpriseMeta: *entMeta, PeerName: peerName, }) + if err != nil { return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) } @@ -1337,7 +1338,6 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, if len(filters) > 1 && !structs.SatisfiesMetaFilters(n.Meta, filters) { continue } - // List all the services on the node services, err := catalogServiceListByNode(tx, n.Node, entMeta, n.PeerName, false) if err != nil { @@ -3557,7 +3557,7 @@ func updateGatewayServices(tx WriteTxn, idx uint64, conf structs.ConfigEntry, en for _, svc := range gatewayServices { // If the service is a wildcard we need to target all services within the namespace if svc.Service.Name == structs.WildcardSpecifier { - if err := updateGatewayNamespace(tx, idx, svc, entMeta); err != nil { + if err := updateGatewayNamespace(tx, idx, svc, &svc.Service.EnterpriseMeta); err != nil { return fmt.Errorf("failed to associate gateway %q with wildcard: %v", gateway.String(), err) } // Skip service-specific update below if there was a wildcard update @@ -3988,7 +3988,7 @@ func updateGatewayService(tx WriteTxn, idx uint64, mapping *structs.GatewayServi } // checkWildcardForGatewaysAndUpdate checks whether a service matches a -// wildcard definition in gateway config entries and if so adds it the +// wildcard definition in gateway config entries and if so adds it the the // gateway-services table. func checkGatewayWildcardsAndUpdate(tx WriteTxn, idx uint64, svc *structs.ServiceName, ns *structs.NodeService, kind structs.GatewayServiceKind) error { sn := structs.ServiceName{Name: structs.WildcardSpecifier, EnterpriseMeta: svc.EnterpriseMeta} @@ -4036,7 +4036,7 @@ func checkGatewayWildcardsAndUpdate(tx WriteTxn, idx uint64, svc *structs.Servic } // checkGatewayAndUpdate checks whether a service matches a -// wildcard definition in gateway config entries and if so adds it the +// wildcard definition in gateway config entries and if so adds it the the // gateway-services table. func checkGatewayAndUpdate(tx WriteTxn, idx uint64, svc *structs.ServiceName, kind structs.GatewayServiceKind) error { sn := structs.ServiceName{Name: svc.Name, EnterpriseMeta: svc.EnterpriseMeta} @@ -4315,7 +4315,7 @@ func (s *Store) ServiceTopology( ws memdb.WatchSet, dc, service string, kind structs.ServiceKind, - defaultAllow bool, + defaultAllow acl.EnforcementDecision, entMeta *acl.EnterpriseMeta, ) (uint64, *structs.ServiceTopology, error) { tx := s.db.ReadTxn() @@ -4466,7 +4466,7 @@ func (s *Store) ServiceTopology( Partition: un.PartitionOrDefault(), Intentions: srcIntentions, MatchType: structs.IntentionMatchDestination, - DefaultAllow: defaultAllow, + DefaultDecision: defaultAllow, AllowPermissions: false, } decision, err := s.IntentionDecision(opts) @@ -4590,7 +4590,7 @@ func (s *Store) ServiceTopology( Partition: dn.PartitionOrDefault(), Intentions: dstIntentions, MatchType: structs.IntentionMatchSource, - DefaultAllow: defaultAllow, + DefaultDecision: defaultAllow, AllowPermissions: false, } decision, err := s.IntentionDecision(opts) diff --git a/agent/consul/state/catalog_ce.go b/agent/consul/state/catalog_ce.go index 8068b7f0700fc..bec9a6a619771 100644 --- a/agent/consul/state/catalog_ce.go +++ b/agent/consul/state/catalog_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/catalog_ce_test.go b/agent/consul/state/catalog_ce_test.go index d1050ecfa04c1..e8c71812f860f 100644 --- a/agent/consul/state/catalog_ce_test.go +++ b/agent/consul/state/catalog_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index 7b2057c6f43a0..0cd7258d5e806 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state @@ -645,7 +645,7 @@ func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNod } // newServiceHealthEventsForNode returns health events for all services on the -// given node. This mirrors some of the logic in the oddly-named +// given node. This mirrors some of the the logic in the oddly-named // parseCheckServiceNodes but is more efficient since we know they are all on // the same node. func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta *acl.EnterpriseMeta, peerName string) ([]stream.Event, error) { diff --git a/agent/consul/state/catalog_events_ce.go b/agent/consul/state/catalog_events_ce.go index 5b1559b22f364..72e3993b5d56d 100644 --- a/agent/consul/state/catalog_events_ce.go +++ b/agent/consul/state/catalog_events_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/catalog_events_ce_test.go b/agent/consul/state/catalog_events_ce_test.go index 6945b5c47656e..0de8286c44c85 100644 --- a/agent/consul/state/catalog_events_ce_test.go +++ b/agent/consul/state/catalog_events_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/catalog_events_test.go b/agent/consul/state/catalog_events_test.go index 94406e34f9aee..46e0b269617fa 100644 --- a/agent/consul/state/catalog_events_test.go +++ b/agent/consul/state/catalog_events_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/catalog_schema.deepcopy.go b/agent/consul/state/catalog_schema.deepcopy.go index af4d430d2f971..406a7fdce796f 100644 --- a/agent/consul/state/catalog_schema.deepcopy.go +++ b/agent/consul/state/catalog_schema.deepcopy.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - // generated by deep-copy -pointer-receiver -o ./catalog_schema.deepcopy.go -type upstreamDownstream ./; DO NOT EDIT. package state diff --git a/agent/consul/state/catalog_schema.go b/agent/consul/state/catalog_schema.go index b8da7c0999361..8702cc2e0cf5f 100644 --- a/agent/consul/state/catalog_schema.go +++ b/agent/consul/state/catalog_schema.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index f18b9beae8433..7ca578307e2f2 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/config_entry.go b/agent/consul/state/config_entry.go index e0fe9e73ec079..a19a78f8d90d6 100644 --- a/agent/consul/state/config_entry.go +++ b/agent/consul/state/config_entry.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/config_entry_ce.go b/agent/consul/state/config_entry_ce.go index 1f70baf19bb88..ec01e0c09aaea 100644 --- a/agent/consul/state/config_entry_ce.go +++ b/agent/consul/state/config_entry_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/config_entry_ce_test.go b/agent/consul/state/config_entry_ce_test.go index 4b9103a74ede8..02fb3be78a2c5 100644 --- a/agent/consul/state/config_entry_ce_test.go +++ b/agent/consul/state/config_entry_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/config_entry_events.go b/agent/consul/state/config_entry_events.go index 6fda5e8e82f12..5681362dbe16e 100644 --- a/agent/consul/state/config_entry_events.go +++ b/agent/consul/state/config_entry_events.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state @@ -28,7 +28,6 @@ var configEntryKindToTopic = map[string]stream.Topic{ structs.RateLimitIPConfig: EventTopicIPRateLimit, structs.SamenessGroup: EventTopicSamenessGroup, structs.JWTProvider: EventTopicJWTProvider, - structs.ExportedServices: EventTopicExportedServices, } // EventSubjectConfigEntry is a stream.Subject used to route and receive events @@ -177,12 +176,6 @@ func (s *Store) JWTProviderSnapshot(req stream.SubscribeRequest, buf stream.Snap return s.configEntrySnapshot(structs.JWTProvider, req, buf) } -// ExportedServicesSnapshot is a stream.SnapshotFunc that returns a snapshot of -// exported-services config entries. -func (s *Store) ExportedServicesSnapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { - return s.configEntrySnapshot(structs.ExportedServices, req, buf) -} - func (s *Store) configEntrySnapshot(kind string, req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { var ( idx uint64 diff --git a/agent/consul/state/config_entry_events_test.go b/agent/consul/state/config_entry_events_test.go index e8ceb10f65d85..1ee92770bc65b 100644 --- a/agent/consul/state/config_entry_events_test.go +++ b/agent/consul/state/config_entry_events_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/config_entry_exported_services.go b/agent/consul/state/config_entry_exported_services.go index 18d9553dc015a..b758adc09eafb 100644 --- a/agent/consul/state/config_entry_exported_services.go +++ b/agent/consul/state/config_entry_exported_services.go @@ -1,17 +1,14 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state import ( "fmt" - "sort" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/proto/private/pbconfigentry" "github.com/hashicorp/go-memdb" ) @@ -67,108 +64,3 @@ func getExportedServicesConfigEntryTxn( } return idx, export, nil } - -// ResolvedExportedServices returns the list of exported services along with consumers. -// Sameness Groups and wild card entries are resolved. -func (s *Store) ResolvedExportedServices(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, []*pbconfigentry.ResolvedExportedService, error) { - tx := s.db.ReadTxn() - defer tx.Abort() - - return resolvedExportedServicesTxn(tx, ws, entMeta) -} - -func resolvedExportedServicesTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, []*pbconfigentry.ResolvedExportedService, error) { - var resp []*pbconfigentry.ResolvedExportedService - - // getSimplifiedExportedServices resolves the sameness group information to partitions and peers. - maxIdx, exports, err := getSimplifiedExportedServices(tx, ws, nil, *entMeta) - if err != nil { - return 0, nil, err - } - if exports == nil { - return maxIdx, nil, nil - } - - var exportedServices []structs.ExportedService - - for _, svc := range exports.Services { - // Prevent exporting the "consul" service. - if svc.Name == structs.ConsulServiceName { - continue - } - - // If this isn't a wildcard, we can simply add it to the list of exportedServices and move to the next entry. - if svc.Name != structs.WildcardSpecifier { - exportedServices = append(exportedServices, svc) - continue - } - - svcEntMeta := acl.NewEnterpriseMetaWithPartition(entMeta.PartitionOrDefault(), svc.Namespace) - - // If all services in the namespace are exported by the wildcard, query those service names. - idx, typicalServices, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical, svcEntMeta) - if err != nil { - return 0, nil, fmt.Errorf("failed to get typical service names: %w", err) - } - - maxIdx = lib.MaxUint64(maxIdx, idx) - - for _, sn := range typicalServices { - // Prevent exporting the "consul" service. - if sn.Service.Name != structs.ConsulServiceName { - exportedServices = append(exportedServices, structs.ExportedService{ - Name: sn.Service.Name, - Namespace: sn.Service.NamespaceOrDefault(), - Consumers: svc.Consumers, - }) - } - } - } - - uniqueExportedServices := getUniqueExportedServices(exportedServices, entMeta) - resp = prepareExportedServicesResponse(uniqueExportedServices, entMeta) - - return maxIdx, resp, nil -} - -// getUniqueExportedServices removes duplicate services and consumers. Services are also sorted in ascending order -func getUniqueExportedServices(exportedServices []structs.ExportedService, entMeta *acl.EnterpriseMeta) []structs.ExportedService { - // Services -> ServiceConsumers - var exportedServicesMapper = make(map[structs.ServiceName]map[structs.ServiceConsumer]struct{}) - for _, svc := range exportedServices { - svcEntMeta := acl.NewEnterpriseMetaWithPartition(entMeta.PartitionOrDefault(), svc.Namespace) - svcName := structs.NewServiceName(svc.Name, &svcEntMeta) - - for _, c := range svc.Consumers { - cons, ok := exportedServicesMapper[svcName] - if !ok { - cons = make(map[structs.ServiceConsumer]struct{}) - exportedServicesMapper[svcName] = cons - } - cons[c] = struct{}{} - } - } - - uniqueExportedServices := make([]structs.ExportedService, 0, len(exportedServicesMapper)) - - for svc, cons := range exportedServicesMapper { - consumers := make([]structs.ServiceConsumer, 0, len(cons)) - for con := range cons { - consumers = append(consumers, con) - } - - uniqueExportedServices = append(uniqueExportedServices, structs.ExportedService{ - Name: svc.Name, - Namespace: svc.NamespaceOrDefault(), - Consumers: consumers, - }) - - } - - sort.Slice(uniqueExportedServices, func(i, j int) bool { - return (uniqueExportedServices[i].Name < uniqueExportedServices[j].Name) || - (uniqueExportedServices[i].Name == uniqueExportedServices[j].Name && uniqueExportedServices[i].Namespace < uniqueExportedServices[j].Namespace) - }) - - return uniqueExportedServices -} diff --git a/agent/consul/state/config_entry_exported_services_ce.go b/agent/consul/state/config_entry_exported_services_ce.go index 3418270902eb0..9dfc4751d2d61 100644 --- a/agent/consul/state/config_entry_exported_services_ce.go +++ b/agent/consul/state/config_entry_exported_services_ce.go @@ -1,17 +1,15 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state import ( - "sort" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/private/pbconfigentry" "github.com/hashicorp/go-memdb" ) @@ -34,29 +32,3 @@ func (s *Store) GetSimplifiedExportedServices(ws memdb.WatchSet, entMeta acl.Ent defer tx.Abort() return getSimplifiedExportedServices(tx, ws, nil, entMeta) } - -func prepareExportedServicesResponse(exportedServices []structs.ExportedService, entMeta *acl.EnterpriseMeta) []*pbconfigentry.ResolvedExportedService { - - resp := make([]*pbconfigentry.ResolvedExportedService, len(exportedServices)) - - for idx, exportedService := range exportedServices { - consumerPeers := []string{} - - for _, consumer := range exportedService.Consumers { - if consumer.Peer != "" { - consumerPeers = append(consumerPeers, consumer.Peer) - } - } - - sort.Strings(consumerPeers) - - resp[idx] = &pbconfigentry.ResolvedExportedService{ - Service: exportedService.Name, - Consumers: &pbconfigentry.Consumers{ - Peers: consumerPeers, - }, - } - } - - return resp -} diff --git a/agent/consul/state/config_entry_exported_services_ce_test.go b/agent/consul/state/config_entry_exported_services_ce_test.go deleted file mode 100644 index a96e5cb3f9816..0000000000000 --- a/agent/consul/state/config_entry_exported_services_ce_test.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package state - -import ( - "testing" - - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/private/pbconfigentry" - "github.com/hashicorp/go-memdb" - "github.com/stretchr/testify/require" -) - -func TestStore_prepareExportedServicesResponse(t *testing.T) { - - exportedServices := []structs.ExportedService{ - { - Name: "db", - Consumers: []structs.ServiceConsumer{ - { - Peer: "west", - }, - { - Peer: "east", - }, - { - Partition: "part", - }, - }, - }, - { - Name: "web", - Consumers: []structs.ServiceConsumer{ - { - Peer: "peer-a", - }, - { - Peer: "peer-b", - }, - }, - }, - } - - resp := prepareExportedServicesResponse(exportedServices, nil) - - expected := []*pbconfigentry.ResolvedExportedService{ - { - Service: "db", - Consumers: &pbconfigentry.Consumers{ - Peers: []string{"east", "west"}, - }, - }, - { - Service: "web", - Consumers: &pbconfigentry.Consumers{ - Peers: []string{"peer-a", "peer-b"}, - }, - }, - } - - require.Equal(t, expected, resp) -} - -func TestStore_ResolvedExportingServices(t *testing.T) { - s := NewStateStore(nil) - var c indexCounter - - { - require.NoError(t, s.EnsureNode(c.Next(), &structs.Node{ - Node: "foo", Address: "127.0.0.1", - })) - - require.NoError(t, s.EnsureService(c.Next(), "foo", &structs.NodeService{ - ID: "db", Service: "db", Port: 5000, - })) - - require.NoError(t, s.EnsureService(c.Next(), "foo", &structs.NodeService{ - ID: "cache", Service: "cache", Port: 5000, - })) - - entry := &structs.ExportedServicesConfigEntry{ - Name: "default", - Services: []structs.ExportedService{ - { - Name: "db", - Consumers: []structs.ServiceConsumer{ - { - Peer: "east", - }, - { - Peer: "west", - }, - }, - }, - { - Name: "cache", - Consumers: []structs.ServiceConsumer{ - { - Peer: "east", - }, - }, - }, - }, - } - err := s.EnsureConfigEntry(c.Next(), entry) - require.NoError(t, err) - - // Adding services to check wildcard config later on - - require.NoError(t, s.EnsureService(c.Next(), "foo", &structs.NodeService{ - ID: "frontend", Service: "frontend", Port: 5000, - })) - - require.NoError(t, s.EnsureService(c.Next(), "foo", &structs.NodeService{ - ID: "backend", Service: "backend", Port: 5000, - })) - - // The consul service should never be exported. - require.NoError(t, s.EnsureService(c.Next(), "foo", &structs.NodeService{ - ID: structs.ConsulServiceID, Service: structs.ConsulServiceName, Port: 8000, - })) - - } - - type testCase struct { - expect []*pbconfigentry.ResolvedExportedService - idx uint64 - } - - run := func(t *testing.T, tc testCase) { - ws := memdb.NewWatchSet() - defaultMeta := structs.DefaultEnterpriseMetaInDefaultPartition() - idx, services, err := s.ResolvedExportedServices(ws, defaultMeta) - require.NoError(t, err) - require.Equal(t, tc.idx, idx) - require.Equal(t, tc.expect, services) - } - - t.Run("only exported services are included", func(t *testing.T) { - tc := testCase{ - expect: []*pbconfigentry.ResolvedExportedService{ - { - Service: "cache", - Consumers: &pbconfigentry.Consumers{ - Peers: []string{"east"}, - }, - }, - { - Service: "db", - Consumers: &pbconfigentry.Consumers{ - Peers: []string{"east", "west"}, - }, - }, - }, - idx: 4, - } - - run(t, tc) - }) - - t.Run("wild card includes all services", func(t *testing.T) { - entry := &structs.ExportedServicesConfigEntry{ - Name: "default", - Services: []structs.ExportedService{ - { - Name: "*", - Consumers: []structs.ServiceConsumer{ - {Peer: "west"}, - }, - }, - }, - } - - err := s.EnsureConfigEntry(c.Next(), entry) - require.NoError(t, err) - - tc := testCase{ - expect: []*pbconfigentry.ResolvedExportedService{ - { - Service: "backend", - Consumers: &pbconfigentry.Consumers{ - Peers: []string{"west"}, - }, - }, - { - Service: "cache", - Consumers: &pbconfigentry.Consumers{ - Peers: []string{"west"}, - }, - }, - { - Service: "db", - Consumers: &pbconfigentry.Consumers{ - Peers: []string{"west"}, - }, - }, - - { - Service: "frontend", - Consumers: &pbconfigentry.Consumers{ - Peers: []string{"west"}, - }, - }, - }, - idx: c.Last(), - } - - run(t, tc) - }) - - t.Run("deleting the config entry clears the services", func(t *testing.T) { - defaultMeta := structs.DefaultEnterpriseMetaInDefaultPartition() - err := s.DeleteConfigEntry(c.Next(), structs.ExportedServices, "default", nil) - require.NoError(t, err) - - idx, result, err := s.ResolvedExportedServices(nil, defaultMeta) - require.NoError(t, err) - require.Equal(t, c.Last(), idx) - require.Nil(t, result) - }) -} - -func TestStore_getUniqueExportedServices(t *testing.T) { - - exportedServices := []structs.ExportedService{ - { - Name: "db", - Consumers: []structs.ServiceConsumer{ - { - Peer: "west", - }, - { - Peer: "east", - }, - { - Partition: "part", - }, - }, - }, - { - Name: "web", - Consumers: []structs.ServiceConsumer{ - { - Peer: "peer-a", - }, - { - Peer: "peer-b", - }, - }, - }, - { - Name: "db", - Consumers: []structs.ServiceConsumer{ - { - Peer: "west", - }, - { - Peer: "west-2", - }, - }, - }, - { - Name: "db", - Consumers: []structs.ServiceConsumer{ - { - Peer: "west", - }, - { - Peer: "west-2", - }, - }, - }, - } - - resp := getUniqueExportedServices(exportedServices, nil) - - expected := []structs.ExportedService{ - { - Name: "db", - Consumers: []structs.ServiceConsumer{ - { - Peer: "west", - }, - { - Peer: "east", - }, - { - Partition: "part", - }, - { - Peer: "west-2", - }, - }, - }, - { - Name: "web", - Consumers: []structs.ServiceConsumer{ - { - Peer: "peer-a", - }, - { - Peer: "peer-b", - }, - }, - }, - } - - require.Equal(t, 2, len(resp)) - - for idx, expSvc := range expected { - require.Equal(t, expSvc.Name, resp[idx].Name) - require.ElementsMatch(t, expSvc.Consumers, resp[idx].Consumers) - } -} diff --git a/agent/consul/state/config_entry_intention.go b/agent/consul/state/config_entry_intention.go index 459d8c4276e6f..301baf9c09093 100644 --- a/agent/consul/state/config_entry_intention.go +++ b/agent/consul/state/config_entry_intention.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/config_entry_intention_ce.go b/agent/consul/state/config_entry_intention_ce.go index 8cefdf2c3dffc..6d479f9ad6bfb 100644 --- a/agent/consul/state/config_entry_intention_ce.go +++ b/agent/consul/state/config_entry_intention_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/config_entry_sameness_group_ce.go b/agent/consul/state/config_entry_sameness_group_ce.go index fcff27158ba2e..16437cd8fd187 100644 --- a/agent/consul/state/config_entry_sameness_group_ce.go +++ b/agent/consul/state/config_entry_sameness_group_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/config_entry_sameness_group_ce_test.go b/agent/consul/state/config_entry_sameness_group_ce_test.go index 3d307238f3329..ce4aeb8394f8b 100644 --- a/agent/consul/state/config_entry_sameness_group_ce_test.go +++ b/agent/consul/state/config_entry_sameness_group_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/config_entry_schema.go b/agent/consul/state/config_entry_schema.go index c662415252966..e420d657cae8a 100644 --- a/agent/consul/state/config_entry_schema.go +++ b/agent/consul/state/config_entry_schema.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/config_entry_test.go b/agent/consul/state/config_entry_test.go index c1bd3e35deff0..05c2dd2bd5afb 100644 --- a/agent/consul/state/config_entry_test.go +++ b/agent/consul/state/config_entry_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index 4b1eeeab783d4..99e99637b6aa9 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/connect_ca_events.go b/agent/consul/state/connect_ca_events.go index a285b9d07cb75..554a867dcd59c 100644 --- a/agent/consul/state/connect_ca_events.go +++ b/agent/consul/state/connect_ca_events.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/connect_ca_events_test.go b/agent/consul/state/connect_ca_events_test.go index 79df8df5be879..bf13eefcb9376 100644 --- a/agent/consul/state/connect_ca_events_test.go +++ b/agent/consul/state/connect_ca_events_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/connect_ca_test.go b/agent/consul/state/connect_ca_test.go index 2a49723d65cbc..124392cf1a432 100644 --- a/agent/consul/state/connect_ca_test.go +++ b/agent/consul/state/connect_ca_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/coordinate.go b/agent/consul/state/coordinate.go index bcd71e5a0f08d..f2eb7b30425e6 100644 --- a/agent/consul/state/coordinate.go +++ b/agent/consul/state/coordinate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/coordinate_ce.go b/agent/consul/state/coordinate_ce.go index 000c3714c794b..17956e964eee6 100644 --- a/agent/consul/state/coordinate_ce.go +++ b/agent/consul/state/coordinate_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/coordinate_ce_test.go b/agent/consul/state/coordinate_ce_test.go index b80a7b8ae832f..a4608245060ea 100644 --- a/agent/consul/state/coordinate_ce_test.go +++ b/agent/consul/state/coordinate_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/coordinate_test.go b/agent/consul/state/coordinate_test.go index dad0ce3e32ec6..0fe582eab5aa9 100644 --- a/agent/consul/state/coordinate_test.go +++ b/agent/consul/state/coordinate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/delay_ce.go b/agent/consul/state/delay_ce.go index d0d2013cb0c8b..a2471ae636241 100644 --- a/agent/consul/state/delay_ce.go +++ b/agent/consul/state/delay_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/delay_test.go b/agent/consul/state/delay_test.go index 6a2d0fa80c2d6..40f1842efd679 100644 --- a/agent/consul/state/delay_test.go +++ b/agent/consul/state/delay_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/events.go b/agent/consul/state/events.go index 2505d9737feed..666dc60035d1e 100644 --- a/agent/consul/state/events.go +++ b/agent/consul/state/events.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state @@ -46,7 +46,7 @@ func PBToStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta acl.E case EventTopicMeshConfig, EventTopicServiceResolver, EventTopicIngressGateway, EventTopicServiceIntentions, EventTopicServiceDefaults, EventTopicAPIGateway, EventTopicTCPRoute, EventTopicHTTPRoute, EventTopicJWTProvider, EventTopicInlineCertificate, - EventTopicBoundAPIGateway, EventTopicSamenessGroup, EventTopicExportedServices: + EventTopicBoundAPIGateway, EventTopicSamenessGroup: subject = EventSubjectConfigEntry{ Name: named.Key, EnterpriseMeta: &entMeta, diff --git a/agent/consul/state/events_test.go b/agent/consul/state/events_test.go index c2a4ad399d641..3da9a26549b2a 100644 --- a/agent/consul/state/events_test.go +++ b/agent/consul/state/events_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/federation_state.go b/agent/consul/state/federation_state.go index a02a38ed3b532..556caa4b48549 100644 --- a/agent/consul/state/federation_state.go +++ b/agent/consul/state/federation_state.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/graveyard.go b/agent/consul/state/graveyard.go index 45398584356cf..5b6a95dafbaa2 100644 --- a/agent/consul/state/graveyard.go +++ b/agent/consul/state/graveyard.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/graveyard_ce.go b/agent/consul/state/graveyard_ce.go index c39a8c335ec5c..963ed6632e5be 100644 --- a/agent/consul/state/graveyard_ce.go +++ b/agent/consul/state/graveyard_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/graveyard_test.go b/agent/consul/state/graveyard_test.go index 66aaaf92fb143..af50673e9e741 100644 --- a/agent/consul/state/graveyard_test.go +++ b/agent/consul/state/graveyard_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/index_connect_test.go b/agent/consul/state/index_connect_test.go index 7b5404b5b2a8e..a598fab68ac44 100644 --- a/agent/consul/state/index_connect_test.go +++ b/agent/consul/state/index_connect_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/indexer.go b/agent/consul/state/indexer.go index c752b3af55cfc..f360eb2befe97 100644 --- a/agent/consul/state/indexer.go +++ b/agent/consul/state/indexer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index 12c79cad5c94a..4341590e4ec20 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state @@ -743,7 +743,7 @@ type IntentionDecisionOpts struct { Peer string Intentions structs.SimplifiedIntentions MatchType structs.IntentionMatchType - DefaultAllow bool + DefaultDecision acl.EnforcementDecision AllowPermissions bool } @@ -763,7 +763,7 @@ func (s *Store) IntentionDecision(opts IntentionDecisionOpts) (structs.Intention } resp := structs.IntentionDecisionSummary{ - DefaultAllow: opts.DefaultAllow, + DefaultAllow: opts.DefaultDecision == acl.Allow, } if ixnMatch == nil { // No intention found, fall back to default @@ -1029,13 +1029,13 @@ func (s *Store) IntentionTopology( ws memdb.WatchSet, target structs.ServiceName, downstreams bool, - defaultAllow bool, + defaultDecision acl.EnforcementDecision, intentionTarget structs.IntentionTargetType, ) (uint64, structs.ServiceList, error) { tx := s.db.ReadTxn() defer tx.Abort() - idx, services, err := s.intentionTopologyTxn(tx, ws, target, downstreams, defaultAllow, intentionTarget) + idx, services, err := s.intentionTopologyTxn(tx, ws, target, downstreams, defaultDecision, intentionTarget) if err != nil { requested := "upstreams" if downstreams { @@ -1055,7 +1055,7 @@ func (s *Store) intentionTopologyTxn( tx ReadTxn, ws memdb.WatchSet, target structs.ServiceName, downstreams bool, - defaultAllow bool, + defaultDecision acl.EnforcementDecision, intentionTarget structs.IntentionTargetType, ) (uint64, []ServiceWithDecision, error) { @@ -1163,7 +1163,7 @@ func (s *Store) intentionTopologyTxn( Partition: candidate.PartitionOrDefault(), Intentions: intentions, MatchType: decisionMatchType, - DefaultAllow: defaultAllow, + DefaultDecision: defaultDecision, AllowPermissions: true, } decision, err := s.IntentionDecision(opts) diff --git a/agent/consul/state/intention_ce.go b/agent/consul/state/intention_ce.go index 33e9bbe26184e..e82177eb1a563 100644 --- a/agent/consul/state/intention_ce.go +++ b/agent/consul/state/intention_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go index b1c7ccf7b89c3..2fc54c6c86815 100644 --- a/agent/consul/state/intention_test.go +++ b/agent/consul/state/intention_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state @@ -1966,27 +1966,27 @@ func TestStore_IntentionDecision(t *testing.T) { src string dst string matchType structs.IntentionMatchType - defaultAllow bool + defaultDecision acl.EnforcementDecision allowPermissions bool expect structs.IntentionDecisionSummary }{ { - name: "no matching intention and default deny", - src: "does-not-exist", - dst: "ditto", - matchType: structs.IntentionMatchDestination, - defaultAllow: false, + name: "no matching intention and default deny", + src: "does-not-exist", + dst: "ditto", + matchType: structs.IntentionMatchDestination, + defaultDecision: acl.Deny, expect: structs.IntentionDecisionSummary{ Allowed: false, DefaultAllow: false, }, }, { - name: "no matching intention and default allow", - src: "does-not-exist", - dst: "ditto", - matchType: structs.IntentionMatchDestination, - defaultAllow: true, + name: "no matching intention and default allow", + src: "does-not-exist", + dst: "ditto", + matchType: structs.IntentionMatchDestination, + defaultDecision: acl.Allow, expect: structs.IntentionDecisionSummary{ Allowed: true, DefaultAllow: true, @@ -2079,7 +2079,7 @@ func TestStore_IntentionDecision(t *testing.T) { Partition: acl.DefaultPartitionName, Intentions: intentions, MatchType: tc.matchType, - DefaultAllow: tc.defaultAllow, + DefaultDecision: tc.defaultDecision, AllowPermissions: tc.allowPermissions, } decision, err := s.IntentionDecision(opts) @@ -2161,7 +2161,7 @@ func TestStore_IntentionTopology(t *testing.T) { } tests := []struct { name string - defaultAllow bool + defaultDecision acl.EnforcementDecision intentions []structs.ServiceIntentionsConfigEntry discoveryChains []structs.ConfigEntry target structs.ServiceName @@ -2169,8 +2169,8 @@ func TestStore_IntentionTopology(t *testing.T) { expect expect }{ { - name: "(upstream) default allow all but intentions deny one", - defaultAllow: true, + name: "(upstream) acl allow all but intentions deny one", + defaultDecision: acl.Allow, intentions: []structs.ServiceIntentionsConfigEntry{ { Kind: structs.ServiceIntentions, @@ -2196,8 +2196,8 @@ func TestStore_IntentionTopology(t *testing.T) { }, }, { - name: "(upstream) default allow includes virtual service", - defaultAllow: true, + name: "(upstream) acl allow includes virtual service", + defaultDecision: acl.Allow, discoveryChains: []structs.ConfigEntry{ &structs.ServiceResolverConfigEntry{ Kind: structs.ServiceResolver, @@ -2225,8 +2225,8 @@ func TestStore_IntentionTopology(t *testing.T) { }, }, { - name: "(upstream) default deny intentions allow virtual service", - defaultAllow: false, + name: "(upstream) acl deny all intentions allow virtual service", + defaultDecision: acl.Deny, discoveryChains: []structs.ConfigEntry{ &structs.ServiceResolverConfigEntry{ Kind: structs.ServiceResolver, @@ -2258,8 +2258,8 @@ func TestStore_IntentionTopology(t *testing.T) { }, }, { - name: "(upstream) default deny intentions allow one", - defaultAllow: false, + name: "(upstream) acl deny all intentions allow one", + defaultDecision: acl.Deny, intentions: []structs.ServiceIntentionsConfigEntry{ { Kind: structs.ServiceIntentions, @@ -2285,8 +2285,8 @@ func TestStore_IntentionTopology(t *testing.T) { }, }, { - name: "(downstream) default allow but intentions deny one", - defaultAllow: true, + name: "(downstream) acl allow all but intentions deny one", + defaultDecision: acl.Allow, intentions: []structs.ServiceIntentionsConfigEntry{ { Kind: structs.ServiceIntentions, @@ -2316,8 +2316,8 @@ func TestStore_IntentionTopology(t *testing.T) { }, }, { - name: "(downstream) default deny all intentions allow one", - defaultAllow: false, + name: "(downstream) acl deny all intentions allow one", + defaultDecision: acl.Deny, intentions: []structs.ServiceIntentionsConfigEntry{ { Kind: structs.ServiceIntentions, @@ -2343,8 +2343,8 @@ func TestStore_IntentionTopology(t *testing.T) { }, }, { - name: "default deny but intention allow all overrides it", - defaultAllow: false, + name: "acl deny but intention allow all overrides it", + defaultDecision: acl.Deny, intentions: []structs.ServiceIntentionsConfigEntry{ { Kind: structs.ServiceIntentions, @@ -2374,8 +2374,8 @@ func TestStore_IntentionTopology(t *testing.T) { }, }, { - name: "default allow but intention deny all overrides it", - defaultAllow: true, + name: "acl allow but intention deny all overrides it", + defaultDecision: acl.Allow, intentions: []structs.ServiceIntentionsConfigEntry{ { Kind: structs.ServiceIntentions, @@ -2396,8 +2396,8 @@ func TestStore_IntentionTopology(t *testing.T) { }, }, { - name: "default deny but intention allow all overrides it", - defaultAllow: false, + name: "acl deny but intention allow all overrides it", + defaultDecision: acl.Deny, intentions: []structs.ServiceIntentionsConfigEntry{ { Kind: structs.ServiceIntentions, @@ -2448,7 +2448,7 @@ func TestStore_IntentionTopology(t *testing.T) { idx++ } - idx, got, err := s.IntentionTopology(nil, tt.target, tt.downstreams, tt.defaultAllow, structs.IntentionTargetService) + idx, got, err := s.IntentionTopology(nil, tt.target, tt.downstreams, tt.defaultDecision, structs.IntentionTargetService) require.NoError(t, err) require.Equal(t, tt.expect.idx, idx) @@ -2502,16 +2502,16 @@ func TestStore_IntentionTopology_Destination(t *testing.T) { services structs.ServiceList } tests := []struct { - name string - defaultAllow bool - intentions []structs.ServiceIntentionsConfigEntry - target structs.ServiceName - downstreams bool - expect expect + name string + defaultDecision acl.EnforcementDecision + intentions []structs.ServiceIntentionsConfigEntry + target structs.ServiceName + downstreams bool + expect expect }{ { - name: "(upstream) default allow all but intentions deny one, destination target", - defaultAllow: true, + name: "(upstream) acl allow all but intentions deny one, destination target", + defaultDecision: acl.Allow, intentions: []structs.ServiceIntentionsConfigEntry{ { Kind: structs.ServiceIntentions, @@ -2537,8 +2537,8 @@ func TestStore_IntentionTopology_Destination(t *testing.T) { }, }, { - name: "(upstream) default deny intentions allow one, destination target", - defaultAllow: false, + name: "(upstream) acl deny all intentions allow one, destination target", + defaultDecision: acl.Deny, intentions: []structs.ServiceIntentionsConfigEntry{ { Kind: structs.ServiceIntentions, @@ -2564,8 +2564,8 @@ func TestStore_IntentionTopology_Destination(t *testing.T) { }, }, { - name: "(upstream) default deny check only destinations show, service target", - defaultAllow: false, + name: "(upstream) acl deny all check only destinations show, service target", + defaultDecision: acl.Deny, intentions: []structs.ServiceIntentionsConfigEntry{ { Kind: structs.ServiceIntentions, @@ -2586,8 +2586,8 @@ func TestStore_IntentionTopology_Destination(t *testing.T) { }, }, { - name: "(upstream) default allow check only destinations show, service target", - defaultAllow: true, + name: "(upstream) acl allow all check only destinations show, service target", + defaultDecision: acl.Allow, intentions: []structs.ServiceIntentionsConfigEntry{ { Kind: structs.ServiceIntentions, @@ -2638,7 +2638,7 @@ func TestStore_IntentionTopology_Destination(t *testing.T) { idx++ } - idx, got, err := s.IntentionTopology(nil, tt.target, tt.downstreams, tt.defaultAllow, structs.IntentionTargetDestination) + idx, got, err := s.IntentionTopology(nil, tt.target, tt.downstreams, tt.defaultDecision, structs.IntentionTargetDestination) require.NoError(t, err) require.Equal(t, tt.expect.idx, idx) @@ -2665,7 +2665,7 @@ func TestStore_IntentionTopology_Watches(t *testing.T) { target := structs.NewServiceName("web", structs.DefaultEnterpriseMetaInDefaultPartition()) ws := memdb.NewWatchSet() - index, got, err := s.IntentionTopology(ws, target, false, false, structs.IntentionTargetService) + index, got, err := s.IntentionTopology(ws, target, false, acl.Deny, structs.IntentionTargetService) require.NoError(t, err) require.Equal(t, uint64(0), index) require.Empty(t, got) @@ -2687,7 +2687,7 @@ func TestStore_IntentionTopology_Watches(t *testing.T) { // Reset the WatchSet ws = memdb.NewWatchSet() - index, got, err = s.IntentionTopology(ws, target, false, false, structs.IntentionTargetService) + index, got, err = s.IntentionTopology(ws, target, false, acl.Deny, structs.IntentionTargetService) require.NoError(t, err) require.Equal(t, uint64(2), index) // Because API is a virtual service, it is included in this output. @@ -2709,7 +2709,7 @@ func TestStore_IntentionTopology_Watches(t *testing.T) { // require.False(t, watchFired(ws)) // Result should not have changed - index, got, err = s.IntentionTopology(ws, target, false, false, structs.IntentionTargetService) + index, got, err = s.IntentionTopology(ws, target, false, acl.Deny, structs.IntentionTargetService) require.NoError(t, err) require.Equal(t, uint64(3), index) require.Equal(t, structs.ServiceList{structs.NewServiceName("api", nil)}, got) @@ -2724,7 +2724,7 @@ func TestStore_IntentionTopology_Watches(t *testing.T) { require.True(t, watchFired(ws)) // Reset the WatchSet - index, got, err = s.IntentionTopology(nil, target, false, false, structs.IntentionTargetService) + index, got, err = s.IntentionTopology(nil, target, false, acl.Deny, structs.IntentionTargetService) require.NoError(t, err) require.Equal(t, uint64(4), index) diff --git a/agent/consul/state/kvs.go b/agent/consul/state/kvs.go index b0b4f6c1e52dc..0d0a419ae5c29 100644 --- a/agent/consul/state/kvs.go +++ b/agent/consul/state/kvs.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/kvs_ce.go b/agent/consul/state/kvs_ce.go index 69d4c28ad2a4e..10528e3be6dbb 100644 --- a/agent/consul/state/kvs_ce.go +++ b/agent/consul/state/kvs_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/kvs_ce_test.go b/agent/consul/state/kvs_ce_test.go index 30e7b22e40d9c..adf41fe7dbe86 100644 --- a/agent/consul/state/kvs_ce_test.go +++ b/agent/consul/state/kvs_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/kvs_test.go b/agent/consul/state/kvs_test.go index b85a08f98d181..4ced02586f0c0 100644 --- a/agent/consul/state/kvs_test.go +++ b/agent/consul/state/kvs_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/memdb.go b/agent/consul/state/memdb.go index 653befe823a32..0a3b66c6a6275 100644 --- a/agent/consul/state/memdb.go +++ b/agent/consul/state/memdb.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state @@ -212,7 +212,6 @@ var ( EventTopicIPRateLimit = pbsubscribe.Topic_IPRateLimit EventTopicSamenessGroup = pbsubscribe.Topic_SamenessGroup EventTopicJWTProvider = pbsubscribe.Topic_JWTProvider - EventTopicExportedServices = pbsubscribe.Topic_ExportedServices ) func processDBChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { diff --git a/agent/consul/state/memdb_test.go b/agent/consul/state/memdb_test.go index e603fc5bb1691..7e893619be5de 100644 --- a/agent/consul/state/memdb_test.go +++ b/agent/consul/state/memdb_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/operations_ce.go b/agent/consul/state/operations_ce.go index 00469d6b0c95d..08de08015b449 100644 --- a/agent/consul/state/operations_ce.go +++ b/agent/consul/state/operations_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/peering.go b/agent/consul/state/peering.go index 05dfa59a37af3..90db748458847 100644 --- a/agent/consul/state/peering.go +++ b/agent/consul/state/peering.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state @@ -202,9 +202,6 @@ func (s *Store) peeringSecretsWriteTxn(tx WriteTxn, req *pbpeering.SecretsWriteR return fmt.Errorf("failed to read peering by id: %w", err) } if peering == nil { - if structs.CEDowngrade { - return nil - } return fmt.Errorf("unknown peering %q for secret", req.PeerID) } diff --git a/agent/consul/state/peering_ce.go b/agent/consul/state/peering_ce.go index 72082689486e8..a54e2d37ddffb 100644 --- a/agent/consul/state/peering_ce.go +++ b/agent/consul/state/peering_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/peering_ce_test.go b/agent/consul/state/peering_ce_test.go index 88546c17855ed..41d1bce452db0 100644 --- a/agent/consul/state/peering_ce_test.go +++ b/agent/consul/state/peering_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/peering_test.go b/agent/consul/state/peering_test.go index 764286bb77b8e..8125b96860aac 100644 --- a/agent/consul/state/peering_test.go +++ b/agent/consul/state/peering_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/prepared_query.go b/agent/consul/state/prepared_query.go index 62cf39588d17a..7638d925170f0 100644 --- a/agent/consul/state/prepared_query.go +++ b/agent/consul/state/prepared_query.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/prepared_query_index.go b/agent/consul/state/prepared_query_index.go index 83bb5dc738251..ac76846366699 100644 --- a/agent/consul/state/prepared_query_index.go +++ b/agent/consul/state/prepared_query_index.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/prepared_query_index_test.go b/agent/consul/state/prepared_query_index_test.go index aaaa62692f1cf..a486047f57e33 100644 --- a/agent/consul/state/prepared_query_index_test.go +++ b/agent/consul/state/prepared_query_index_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/prepared_query_test.go b/agent/consul/state/prepared_query_test.go index dc902de4ad309..f0b0cd95f4469 100644 --- a/agent/consul/state/prepared_query_test.go +++ b/agent/consul/state/prepared_query_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/query.go b/agent/consul/state/query.go index 288e715e83314..2256aab995fb0 100644 --- a/agent/consul/state/query.go +++ b/agent/consul/state/query.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/query_ce.go b/agent/consul/state/query_ce.go index 809f35279987c..98108d6ff02b2 100644 --- a/agent/consul/state/query_ce.go +++ b/agent/consul/state/query_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/schema.go b/agent/consul/state/schema.go index 0934ca483e5eb..9e0e6db2fee42 100644 --- a/agent/consul/state/schema.go +++ b/agent/consul/state/schema.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/schema_ce.go b/agent/consul/state/schema_ce.go index 7dce71a038f99..eecde09aab619 100644 --- a/agent/consul/state/schema_ce.go +++ b/agent/consul/state/schema_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/schema_ce_test.go b/agent/consul/state/schema_ce_test.go index 9ed597acf6562..55fc3ee54c194 100644 --- a/agent/consul/state/schema_ce_test.go +++ b/agent/consul/state/schema_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/schema_test.go b/agent/consul/state/schema_test.go index a0af2223e27ce..f67b18e8c3beb 100644 --- a/agent/consul/state/schema_test.go +++ b/agent/consul/state/schema_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/session.go b/agent/consul/state/session.go index d57b05947d396..5e666f80fd894 100644 --- a/agent/consul/state/session.go +++ b/agent/consul/state/session.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/session_ce.go b/agent/consul/state/session_ce.go index 2fa24878541cb..1854fb3e1448b 100644 --- a/agent/consul/state/session_ce.go +++ b/agent/consul/state/session_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/session_test.go b/agent/consul/state/session_test.go index 08f7ad09d0c1f..eab4299581637 100644 --- a/agent/consul/state/session_test.go +++ b/agent/consul/state/session_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go index dff3441535bb5..fce3b3c96155f 100644 --- a/agent/consul/state/state_store.go +++ b/agent/consul/state/state_store.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/state_store_ce_test.go b/agent/consul/state/state_store_ce_test.go index ca57cf1aface8..5515b193f688c 100644 --- a/agent/consul/state/state_store_ce_test.go +++ b/agent/consul/state/state_store_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/state_store_test.go b/agent/consul/state/state_store_test.go index 751ecee779dee..587f15c03d948 100644 --- a/agent/consul/state/state_store_test.go +++ b/agent/consul/state/state_store_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/store_integration_test.go b/agent/consul/state/store_integration_test.go index 25a91c558646f..9395aa1cb1820 100644 --- a/agent/consul/state/store_integration_test.go +++ b/agent/consul/state/store_integration_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/system_metadata.go b/agent/consul/state/system_metadata.go index 06e2d3cc598b9..ed802efbd1cd8 100644 --- a/agent/consul/state/system_metadata.go +++ b/agent/consul/state/system_metadata.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/system_metadata_test.go b/agent/consul/state/system_metadata_test.go index c2ac97b390b8c..59f8bcd30c3eb 100644 --- a/agent/consul/state/system_metadata_test.go +++ b/agent/consul/state/system_metadata_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/tombstone_gc.go b/agent/consul/state/tombstone_gc.go index 6eab5b6b5ba91..3fc19c5cd9cc8 100644 --- a/agent/consul/state/tombstone_gc.go +++ b/agent/consul/state/tombstone_gc.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/tombstone_gc_test.go b/agent/consul/state/tombstone_gc_test.go index def4c9af19720..d0fd11fa7c5a6 100644 --- a/agent/consul/state/tombstone_gc_test.go +++ b/agent/consul/state/tombstone_gc_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/txn.go b/agent/consul/state/txn.go index 30189fc1ed602..81d8acd039291 100644 --- a/agent/consul/state/txn.go +++ b/agent/consul/state/txn.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/txn_test.go b/agent/consul/state/txn_test.go index bda004a63a3b6..a128badf42e52 100644 --- a/agent/consul/state/txn_test.go +++ b/agent/consul/state/txn_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/usage.go b/agent/consul/state/usage.go index 20515e2e7b0cd..0893d25288b68 100644 --- a/agent/consul/state/usage.go +++ b/agent/consul/state/usage.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/state/usage_ce.go b/agent/consul/state/usage_ce.go index 679d114232d07..1824cf12399a5 100644 --- a/agent/consul/state/usage_ce.go +++ b/agent/consul/state/usage_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package state diff --git a/agent/consul/state/usage_test.go b/agent/consul/state/usage_test.go index 4195779b8c037..68844ebc1140d 100644 --- a/agent/consul/state/usage_test.go +++ b/agent/consul/state/usage_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package state diff --git a/agent/consul/stats_fetcher.go b/agent/consul/stats_fetcher.go index 94e122f2b4389..d52930e85add3 100644 --- a/agent/consul/stats_fetcher.go +++ b/agent/consul/stats_fetcher.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/stats_fetcher_test.go b/agent/consul/stats_fetcher_test.go index 8dc9ce9eb2880..783424393a79e 100644 --- a/agent/consul/stats_fetcher_test.go +++ b/agent/consul/stats_fetcher_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/status_endpoint.go b/agent/consul/status_endpoint.go index bca454e25eb0f..efa2fa2cf4fa6 100644 --- a/agent/consul/status_endpoint.go +++ b/agent/consul/status_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/status_endpoint_test.go b/agent/consul/status_endpoint_test.go index d4cf5cb7798b9..6d95d7f6fd48d 100644 --- a/agent/consul/status_endpoint_test.go +++ b/agent/consul/status_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/stream/event.go b/agent/consul/stream/event.go index df8160d2dd366..db6f3a6312f38 100644 --- a/agent/consul/stream/event.go +++ b/agent/consul/stream/event.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 /* Package stream provides a publish/subscribe system for events produced by changes diff --git a/agent/consul/stream/event_buffer.go b/agent/consul/stream/event_buffer.go index 08060306e8d64..1c7f8c2b956a5 100644 --- a/agent/consul/stream/event_buffer.go +++ b/agent/consul/stream/event_buffer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package stream diff --git a/agent/consul/stream/event_buffer_test.go b/agent/consul/stream/event_buffer_test.go index 892a14d733e1a..b6ec48e1775e1 100644 --- a/agent/consul/stream/event_buffer_test.go +++ b/agent/consul/stream/event_buffer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package stream diff --git a/agent/consul/stream/event_publisher.go b/agent/consul/stream/event_publisher.go index 04aa08334b25b..f39ea22869a08 100644 --- a/agent/consul/stream/event_publisher.go +++ b/agent/consul/stream/event_publisher.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package stream @@ -23,7 +23,7 @@ type EventPublisher struct { // seconds. snapCacheTTL time.Duration - // This lock protects the snapCache, topicBuffers, snapshotHandlers, and topicBuffer.refs. + // This lock protects the snapCache, topicBuffers and topicBuffer.refs. lock sync.RWMutex // topicBuffers stores the head of the linked-list buffers to publish events to @@ -116,18 +116,16 @@ func NewEventPublisher(snapCacheTTL time.Duration) *EventPublisher { } // RegisterHandler will register a new snapshot handler function. The expectation is -// that all handlers get registered prior to the event publisher being Run. Passing -// supportsWildcard allows consumers to subscribe to events on this topic with *any* -// subject (by requesting SubjectWildcard) but this must be supported by the handler -// function. +// that all handlers get registered prior to the event publisher being Run. Handler +// registration is therefore not concurrency safe and access to handlers is internally +// not synchronized. Passing supportsWildcard allows consumers to subscribe to events +// on this topic with *any* subject (by requesting SubjectWildcard) but this must be +// supported by the handler function. func (e *EventPublisher) RegisterHandler(topic Topic, handler SnapshotFunc, supportsWildcard bool) error { if topic.String() == "" { return fmt.Errorf("the topic cannnot be empty") } - e.lock.Lock() - defer e.lock.Unlock() - if _, found := e.snapshotHandlers[topic]; found { return fmt.Errorf("a handler is already registered for the topic: %s", topic.String()) } @@ -144,33 +142,12 @@ func (e *EventPublisher) RegisterHandler(topic Topic, handler SnapshotFunc, supp return nil } -func (e *EventPublisher) RefreshAllTopics() { - topics := make(map[Topic]struct{}) - - e.lock.Lock() - for topic := range e.snapshotHandlers { - topics[topic] = struct{}{} - e.forceEvictByTopicLocked(topic) - } - e.lock.Unlock() - - for topic := range topics { - e.subscriptions.closeAllByTopic(topic) - } -} - func (e *EventPublisher) RefreshTopic(topic Topic) error { - e.lock.Lock() - _, found := e.snapshotHandlers[topic] - e.lock.Unlock() - - if !found { + if _, found := e.snapshotHandlers[topic]; !found { return fmt.Errorf("topic %s is not registered", topic) } - e.lock.Lock() - e.forceEvictByTopicLocked(topic) - e.lock.Unlock() + e.forceEvictByTopic(topic) e.subscriptions.closeAllByTopic(topic) return nil @@ -461,12 +438,14 @@ func (e *EventPublisher) setCachedSnapshotLocked(req *SubscribeRequest, snap *ev }) } -// forceEvictByTopicLocked will remove all entries from the snapshot cache for a given topic. -// This method should be called while holding the EventPublisher's lock. -func (e *EventPublisher) forceEvictByTopicLocked(topic Topic) { +// forceEvictByTopic will remove all entries from the snapshot cache for a given topic. +// This method should be called while holding the publishers lock. +func (e *EventPublisher) forceEvictByTopic(topic Topic) { + e.lock.Lock() for key := range e.snapCache { if key.Topic == topic.String() { delete(e.snapCache, key) } } + e.lock.Unlock() } diff --git a/agent/consul/stream/event_publisher_test.go b/agent/consul/stream/event_publisher_test.go index 09e3bc70911df..13efd0fb564ab 100644 --- a/agent/consul/stream/event_publisher_test.go +++ b/agent/consul/stream/event_publisher_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package stream @@ -210,11 +210,9 @@ func TestEventPublisher_SubscribeWithIndex0_FromCache(t *testing.T) { require.NoError(t, err) defer sub.Unsubscribe() - publisher.lock.Lock() publisher.snapshotHandlers[testTopic] = func(_ SubscribeRequest, _ SnapshotAppender) (uint64, error) { return 0, fmt.Errorf("error should not be seen, cache should have been used") } - publisher.lock.Unlock() sub, err = publisher.Subscribe(req) require.NoError(t, err) @@ -396,11 +394,9 @@ func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshotFromCache(t *testin publisher.publishEvent([]Event{nextEvent}) }) - publisher.lock.Lock() publisher.snapshotHandlers[testTopic] = func(_ SubscribeRequest, _ SnapshotAppender) (uint64, error) { return 0, fmt.Errorf("error should not be seen, cache should have been used") } - publisher.lock.Unlock() testutil.RunStep(t, "resume the subscription", func(t *testing.T) { newReq := *req @@ -480,11 +476,9 @@ func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshot_WithCache(t *testi require.Equal(t, uint64(3), next.Index) }) - publisher.lock.Lock() publisher.snapshotHandlers[testTopic] = func(_ SubscribeRequest, _ SnapshotAppender) (uint64, error) { return 0, fmt.Errorf("error should not be seen, cache should have been used") } - publisher.lock.Unlock() testutil.RunStep(t, "resume the subscription", func(t *testing.T) { newReq := *req diff --git a/agent/consul/stream/event_snapshot.go b/agent/consul/stream/event_snapshot.go index 40c9f3d007d50..6b4b693689b42 100644 --- a/agent/consul/stream/event_snapshot.go +++ b/agent/consul/stream/event_snapshot.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package stream diff --git a/agent/consul/stream/event_snapshot_test.go b/agent/consul/stream/event_snapshot_test.go index 8a6d4e27c6bf5..0888b90c39c9c 100644 --- a/agent/consul/stream/event_snapshot_test.go +++ b/agent/consul/stream/event_snapshot_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package stream diff --git a/agent/consul/stream/event_test.go b/agent/consul/stream/event_test.go index 22afe390de9c7..ff6f07c10ea83 100644 --- a/agent/consul/stream/event_test.go +++ b/agent/consul/stream/event_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package stream diff --git a/agent/consul/stream/noop.go b/agent/consul/stream/noop.go index 65fcbb3fb7770..1cd35ea922eb8 100644 --- a/agent/consul/stream/noop.go +++ b/agent/consul/stream/noop.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package stream diff --git a/agent/consul/stream/string_types.go b/agent/consul/stream/string_types.go index 2d0cb656777d3..6e6c4ef8e92fa 100644 --- a/agent/consul/stream/string_types.go +++ b/agent/consul/stream/string_types.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package stream diff --git a/agent/consul/stream/subscription.go b/agent/consul/stream/subscription.go index 23911eff2e657..40286768abe02 100644 --- a/agent/consul/stream/subscription.go +++ b/agent/consul/stream/subscription.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package stream diff --git a/agent/consul/stream/subscription_test.go b/agent/consul/stream/subscription_test.go index fd4af464ee532..9bf0b95b5d564 100644 --- a/agent/consul/stream/subscription_test.go +++ b/agent/consul/stream/subscription_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package stream diff --git a/agent/consul/subscribe_backend.go b/agent/consul/subscribe_backend.go index c73dea18136af..9afcd4fc567df 100644 --- a/agent/consul/subscribe_backend.go +++ b/agent/consul/subscribe_backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/subscribe_backend_test.go b/agent/consul/subscribe_backend_test.go index 8d0f7a501ca1e..833f049c9728a 100644 --- a/agent/consul/subscribe_backend_test.go +++ b/agent/consul/subscribe_backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/system_metadata.go b/agent/consul/system_metadata.go index f110255aa2e09..40185294b8d77 100644 --- a/agent/consul/system_metadata.go +++ b/agent/consul/system_metadata.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/system_metadata_test.go b/agent/consul/system_metadata_test.go index 75e69786b8dd5..7c4eb30e4732e 100644 --- a/agent/consul/system_metadata_test.go +++ b/agent/consul/system_metadata_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/tenancy_bridge.go b/agent/consul/tenancy_bridge.go deleted file mode 100644 index 4e8daa0bc8def..0000000000000 --- a/agent/consul/tenancy_bridge.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package consul - -import "github.com/hashicorp/consul/agent/grpc-external/services/resource" - -// V1TenancyBridge is used by the resource service to access V1 implementations of -// partitions and namespaces. This bridge will be removed when V2 implemenations -// of partitions and namespaces are available. -type V1TenancyBridge struct { - server *Server -} - -func NewV1TenancyBridge(server *Server) resource.TenancyBridge { - return &V1TenancyBridge{server: server} -} diff --git a/agent/consul/tenancy_bridge_ce.go b/agent/consul/tenancy_bridge_ce.go deleted file mode 100644 index f2938b156f026..0000000000000 --- a/agent/consul/tenancy_bridge_ce.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package consul - -func (b *V1TenancyBridge) PartitionExists(partition string) (bool, error) { - if partition == "default" { - return true, nil - } - return false, nil -} - -func (b *V1TenancyBridge) IsPartitionMarkedForDeletion(partition string) (bool, error) { - return false, nil -} - -func (b *V1TenancyBridge) NamespaceExists(partition, namespace string) (bool, error) { - if partition == "default" && namespace == "default" { - return true, nil - } - return false, nil -} - -func (b *V1TenancyBridge) IsNamespaceMarkedForDeletion(partition, namespace string) (bool, error) { - return false, nil -} diff --git a/agent/consul/testdata/v2-resource-dependencies.md b/agent/consul/testdata/v2-resource-dependencies.md deleted file mode 100644 index e394247866a42..0000000000000 --- a/agent/consul/testdata/v2-resource-dependencies.md +++ /dev/null @@ -1,68 +0,0 @@ -```mermaid -flowchart TD - auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/namespacetrafficpermissions - auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/partitiontrafficpermissions - auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/trafficpermissions - auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/workloadidentity - auth/v2beta1/namespacetrafficpermissions - auth/v2beta1/partitiontrafficpermissions - auth/v2beta1/trafficpermissions - auth/v2beta1/workloadidentity - catalog/v2beta1/computedfailoverpolicy --> catalog/v2beta1/failoverpolicy - catalog/v2beta1/computedfailoverpolicy --> catalog/v2beta1/service - catalog/v2beta1/failoverpolicy - catalog/v2beta1/healthstatus - catalog/v2beta1/node --> catalog/v2beta1/nodehealthstatus - catalog/v2beta1/nodehealthstatus - catalog/v2beta1/service - catalog/v2beta1/serviceendpoints --> catalog/v2beta1/service - catalog/v2beta1/serviceendpoints --> catalog/v2beta1/workload - catalog/v2beta1/workload --> catalog/v2beta1/healthstatus - catalog/v2beta1/workload --> catalog/v2beta1/node - demo/v1/album - demo/v1/artist - demo/v1/concept - demo/v1/executive - demo/v1/recordlabel - demo/v2/album - demo/v2/artist - hcp/v2/link - hcp/v2/telemetrystate --> hcp/v2/link - internal/v1/tombstone - mesh/v2beta1/computedexplicitdestinations --> catalog/v2beta1/service - mesh/v2beta1/computedexplicitdestinations --> catalog/v2beta1/workload - mesh/v2beta1/computedexplicitdestinations --> mesh/v2beta1/computedroutes - mesh/v2beta1/computedexplicitdestinations --> mesh/v2beta1/destinations - mesh/v2beta1/computedproxyconfiguration --> catalog/v2beta1/workload - mesh/v2beta1/computedproxyconfiguration --> mesh/v2beta1/proxyconfiguration - mesh/v2beta1/computedroutes --> catalog/v2beta1/computedfailoverpolicy - mesh/v2beta1/computedroutes --> catalog/v2beta1/service - mesh/v2beta1/computedroutes --> mesh/v2beta1/destinationpolicy - mesh/v2beta1/computedroutes --> mesh/v2beta1/grpcroute - mesh/v2beta1/computedroutes --> mesh/v2beta1/httproute - mesh/v2beta1/computedroutes --> mesh/v2beta1/tcproute - mesh/v2beta1/destinationpolicy - mesh/v2beta1/destinations - mesh/v2beta1/grpcroute - mesh/v2beta1/httproute - mesh/v2beta1/meshconfiguration - mesh/v2beta1/meshgateway - mesh/v2beta1/proxyconfiguration - mesh/v2beta1/proxystatetemplate --> auth/v2beta1/computedtrafficpermissions - mesh/v2beta1/proxystatetemplate --> catalog/v2beta1/service - mesh/v2beta1/proxystatetemplate --> catalog/v2beta1/serviceendpoints - mesh/v2beta1/proxystatetemplate --> catalog/v2beta1/workload - mesh/v2beta1/proxystatetemplate --> mesh/v2beta1/computedexplicitdestinations - mesh/v2beta1/proxystatetemplate --> mesh/v2beta1/computedproxyconfiguration - mesh/v2beta1/proxystatetemplate --> mesh/v2beta1/computedroutes - mesh/v2beta1/proxystatetemplate --> multicluster/v2/computedexportedservices - mesh/v2beta1/tcproute - multicluster/v2/computedexportedservices --> catalog/v2beta1/service - multicluster/v2/computedexportedservices --> multicluster/v2/exportedservices - multicluster/v2/computedexportedservices --> multicluster/v2/namespaceexportedservices - multicluster/v2/computedexportedservices --> multicluster/v2/partitionexportedservices - multicluster/v2/exportedservices - multicluster/v2/namespaceexportedservices - multicluster/v2/partitionexportedservices - tenancy/v2beta1/namespace -``` \ No newline at end of file diff --git a/agent/consul/txn_endpoint.go b/agent/consul/txn_endpoint.go index f39cd502cb170..e7e5d870875e9 100644 --- a/agent/consul/txn_endpoint.go +++ b/agent/consul/txn_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/txn_endpoint_test.go b/agent/consul/txn_endpoint_test.go index ef2ecd13a3f85..f5654fdc001db 100644 --- a/agent/consul/txn_endpoint_test.go +++ b/agent/consul/txn_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/type_registry.go b/agent/consul/type_registry.go deleted file mode 100644 index 450cef7e059a9..0000000000000 --- a/agent/consul/type_registry.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package consul - -import ( - "github.com/hashicorp/consul/internal/auth" - "github.com/hashicorp/consul/internal/catalog" - "github.com/hashicorp/consul/internal/hcp" - "github.com/hashicorp/consul/internal/mesh" - "github.com/hashicorp/consul/internal/multicluster" - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/internal/resource/demo" - "github.com/hashicorp/consul/internal/tenancy" -) - -// NewTypeRegistry returns a registry populated with all supported resource -// types. -// -// Note: the registry includes resource types that may not be suitable for -// production use (e.g. experimental or development resource types) because -// it is used in the CLI, where feature flags and other runtime configuration -// may not be available. -func NewTypeRegistry() resource.Registry { - registry := resource.NewRegistry() - - demo.RegisterTypes(registry) - mesh.RegisterTypes(registry) - catalog.RegisterTypes(registry) - auth.RegisterTypes(registry) - tenancy.RegisterTypes(registry) - multicluster.RegisterTypes(registry) - hcp.RegisterTypes(registry) - - return registry -} diff --git a/agent/consul/usagemetrics/usagemetrics.go b/agent/consul/usagemetrics/usagemetrics.go index 9539a743bbe69..eee4a8da06d48 100644 --- a/agent/consul/usagemetrics/usagemetrics.go +++ b/agent/consul/usagemetrics/usagemetrics.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package usagemetrics diff --git a/agent/consul/usagemetrics/usagemetrics_ce.go b/agent/consul/usagemetrics/usagemetrics_ce.go index 71457cbb48a1b..17853b4fcf6e3 100644 --- a/agent/consul/usagemetrics/usagemetrics_ce.go +++ b/agent/consul/usagemetrics/usagemetrics_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package usagemetrics diff --git a/agent/consul/usagemetrics/usagemetrics_ce_test.go b/agent/consul/usagemetrics/usagemetrics_ce_test.go index d0b7587d068c1..01a8b9eab5b8d 100644 --- a/agent/consul/usagemetrics/usagemetrics_ce_test.go +++ b/agent/consul/usagemetrics/usagemetrics_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package usagemetrics diff --git a/agent/consul/usagemetrics/usagemetrics_test.go b/agent/consul/usagemetrics/usagemetrics_test.go index 4e48beb0a3b2e..5aea3588f0558 100644 --- a/agent/consul/usagemetrics/usagemetrics_test.go +++ b/agent/consul/usagemetrics/usagemetrics_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package usagemetrics diff --git a/agent/consul/util.go b/agent/consul/util.go index b5827f659a38c..6fd6c77da33b1 100644 --- a/agent/consul/util.go +++ b/agent/consul/util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul @@ -11,9 +11,7 @@ import ( "github.com/hashicorp/go-version" "github.com/hashicorp/serf/serf" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/metadata" - "github.com/hashicorp/consul/agent/structs" ) // CanServersUnderstandProtocol checks to see if all the servers in the given @@ -173,14 +171,3 @@ func isSerfMember(s *serf.Serf, nodeName string) bool { } return false } - -func DefaultIntentionAllow(authz acl.Authorizer, defaultIntentionPolicy string) bool { - // The default intention policy inherits from ACLs but - // is overridden by the agent's DefaultIntentionPolicy. - //nolint:staticcheck - defaultAllow := authz.IntentionDefaultAllow(nil) == acl.Allow - if defaultIntentionPolicy != "" { - defaultAllow = defaultIntentionPolicy == structs.IntentionDefaultPolicyAllow - } - return defaultAllow -} diff --git a/agent/consul/util_test.go b/agent/consul/util_test.go index d0a4fcb6842f1..c41b7748919fa 100644 --- a/agent/consul/util_test.go +++ b/agent/consul/util_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package consul diff --git a/agent/consul/v2_config_entry_exports_shim.go b/agent/consul/v2_config_entry_exports_shim.go deleted file mode 100644 index 8b2ddaf3a7062..0000000000000 --- a/agent/consul/v2_config_entry_exports_shim.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package consul - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/consul/controller/queue" - "github.com/hashicorp/consul/agent/consul/state" - "github.com/hashicorp/consul/agent/consul/stream" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/logging" - "github.com/hashicorp/consul/proto/private/pbconfigentry" - "github.com/hashicorp/go-hclog" -) - -type v1ServiceExportsShim struct { - s *Server - eventCh chan controller.Event -} - -func NewExportedServicesShim(s *Server) *v1ServiceExportsShim { - eventCh := make(chan controller.Event) - return &v1ServiceExportsShim{ - s: s, - eventCh: eventCh, - } -} - -func (s *v1ServiceExportsShim) Start(ctx context.Context) { - logger := s.s.logger.Named(logging.V2ExportsShim) - - // TODO replace this with a proper supervisor. - for ctx.Err() == nil { - err := subscribeToExportedServicesEvents(ctx, logger, s.s.publisher, s.eventCh) - - if err != nil { - logger.Warn("encountered an error while streaming exported services", "error", err) - select { - case <-time.After(time.Second): - case <-ctx.Done(): - return - } - } else { - return - } - } -} - -func subscribeToExportedServicesEvents(ctx context.Context, logger hclog.Logger, publisher *stream.EventPublisher, eventCh chan controller.Event) error { - subscription, err := publisher.Subscribe(&stream.SubscribeRequest{ - Topic: state.EventTopicExportedServices, - Subject: stream.SubjectWildcard, - }) - if err != nil { - return err - } - defer subscription.Unsubscribe() - var index uint64 - - for { - event, err := subscription.Next(ctx) - switch { - case errors.Is(err, context.Canceled): - return nil - case err != nil: - return err - } - - if event.IsFramingEvent() { - continue - } - - if event.Index <= index { - continue - } - - index = event.Index - e := event.Payload.ToSubscriptionEvent(event.Index) - configEntry := e.GetConfigEntry().GetConfigEntry() - - if configEntry.GetKind() != pbconfigentry.Kind_KindExportedServices { - logger.Error("unexpected config entry kind", "kind", configEntry.GetKind()) - continue - } - partition := acl.PartitionOrDefault(configEntry.GetEnterpriseMeta().GetPartition()) - - eventCh <- controller.Event{ - Obj: exportedServiceItemType{partition: partition}, - } - } -} - -func (s *v1ServiceExportsShim) EventChannel() chan controller.Event { - return s.eventCh -} - -func (s *v1ServiceExportsShim) GetExportedServicesConfigEntry(_ context.Context, name string, entMeta *acl.EnterpriseMeta) (*structs.ExportedServicesConfigEntry, error) { - _, entry, err := s.s.fsm.State().ConfigEntry(nil, structs.ExportedServices, name, entMeta) - if err != nil { - return nil, err - } - - if entry == nil { - return nil, nil - } - - exp, ok := entry.(*structs.ExportedServicesConfigEntry) - if !ok { - return nil, fmt.Errorf("exported services config entry is the wrong type: expected ExportedServicesConfigEntry, actual: %T", entry) - } - - return exp, nil -} - -func (s *v1ServiceExportsShim) WriteExportedServicesConfigEntry(_ context.Context, cfg *structs.ExportedServicesConfigEntry) error { - if err := cfg.Normalize(); err != nil { - return err - } - - if err := cfg.Validate(); err != nil { - return err - } - - req := &structs.ConfigEntryRequest{ - Op: structs.ConfigEntryUpsert, - Entry: cfg, - } - - _, err := s.s.raftApply(structs.ConfigEntryRequestType, req) - return err -} - -func (s *v1ServiceExportsShim) DeleteExportedServicesConfigEntry(_ context.Context, name string, entMeta *acl.EnterpriseMeta) error { - if entMeta == nil { - entMeta = acl.DefaultEnterpriseMeta() - } - - req := &structs.ConfigEntryRequest{ - Op: structs.ConfigEntryDelete, - Entry: &structs.ExportedServicesConfigEntry{ - Name: name, - EnterpriseMeta: *entMeta, - }, - } - - if err := req.Entry.Normalize(); err != nil { - return err - } - - _, err := s.s.raftApply(structs.ConfigEntryRequestType, req) - return err -} - -type exportedServiceItemType struct { - partition string -} - -var _ queue.ItemType = (*exportedServiceItemType)(nil) - -func (e exportedServiceItemType) Key() string { - return e.partition -} diff --git a/agent/consul/v2_config_entry_exports_shim_test.go b/agent/consul/v2_config_entry_exports_shim_test.go deleted file mode 100644 index f44eae0681bca..0000000000000 --- a/agent/consul/v2_config_entry_exports_shim_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package consul - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/testrpc" -) - -func TestV1ServiceExportsShim_Integration(t *testing.T) { - t.Parallel() - _, srv := testServerDC(t, "dc1") - - shim := NewExportedServicesShim(srv) - testrpc.WaitForLeader(t, srv.RPC, "dc1") - - v1ServiceExportsShimTests(t, shim, []*structs.ExportedServicesConfigEntry{ - { - Name: "default", - Services: []structs.ExportedService{ - { - Name: "foo", - Consumers: []structs.ServiceConsumer{ - {Peer: "cluster-01"}, - }, - }, - }, - RaftIndex: structs.RaftIndex{ - CreateIndex: 0, - ModifyIndex: 1, - }, - }, - { - Name: "default", - Services: []structs.ExportedService{ - { - Name: "bar", - Consumers: []structs.ServiceConsumer{ - {Peer: "cluster-01"}, - }, - }, - }, - RaftIndex: structs.RaftIndex{ - CreateIndex: 0, - ModifyIndex: 2, - }, - }, - }) -} - -func v1ServiceExportsShimTests(t *testing.T, shim *v1ServiceExportsShim, configs []*structs.ExportedServicesConfigEntry) { - ctx := context.Background() - - go shim.Start(context.Background()) - - partitions := make(map[string]*acl.EnterpriseMeta) - for _, config := range configs { - partitions[config.PartitionOrDefault()] = config.GetEnterpriseMeta() - } - - for _, entMeta := range partitions { - exportedServices, err := shim.GetExportedServicesConfigEntry(ctx, entMeta.PartitionOrDefault(), entMeta) - require.Nil(t, err) - require.Nil(t, exportedServices) - } - - for _, config := range configs { - err := shim.WriteExportedServicesConfigEntry(ctx, config) - require.NoError(t, err) - shim.assertPartitionEvent(t, config.PartitionOrDefault()) - } - - for _, entMeta := range partitions { - err := shim.DeleteExportedServicesConfigEntry(ctx, entMeta.PartitionOrDefault(), entMeta) - require.NoError(t, err) - shim.assertPartitionEvent(t, entMeta.PartitionOrDefault()) - } -} - -func (s *v1ServiceExportsShim) assertPartitionEvent(t *testing.T, partition string) { - t.Helper() - - select { - case event := <-s.eventCh: - require.Equal(t, partition, event.Obj.Key()) - case <-time.After(250 * time.Millisecond): - t.Fatal("timeout waiting for view to receive events") - } -} diff --git a/agent/consul/wanfed/pool.go b/agent/consul/wanfed/pool.go index 3d083019346d4..9320087b3f713 100644 --- a/agent/consul/wanfed/pool.go +++ b/agent/consul/wanfed/pool.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package wanfed diff --git a/agent/consul/wanfed/wanfed.go b/agent/consul/wanfed/wanfed.go index 7732e05ad39a4..e82eddcfa88ef 100644 --- a/agent/consul/wanfed/wanfed.go +++ b/agent/consul/wanfed/wanfed.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package wanfed diff --git a/agent/consul/wanfed/wanfed_test.go b/agent/consul/wanfed/wanfed_test.go index 8254e9434b367..ef45c197c179a 100644 --- a/agent/consul/wanfed/wanfed_test.go +++ b/agent/consul/wanfed/wanfed_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package wanfed diff --git a/agent/consul/watch/server_local.go b/agent/consul/watch/server_local.go index 2bb98fe349df9..5937ba1c6a10e 100644 --- a/agent/consul/watch/server_local.go +++ b/agent/consul/watch/server_local.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package watch diff --git a/agent/consul/watch/server_local_test.go b/agent/consul/watch/server_local_test.go index 84ab8de5739a2..1f96b1ec00d03 100644 --- a/agent/consul/watch/server_local_test.go +++ b/agent/consul/watch/server_local_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package watch diff --git a/agent/consul/xdscapacity/capacity.go b/agent/consul/xdscapacity/capacity.go index 5fb538344d561..57c1f3894244a 100644 --- a/agent/consul/xdscapacity/capacity.go +++ b/agent/consul/xdscapacity/capacity.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package xdscapacity diff --git a/agent/consul/xdscapacity/capacity_test.go b/agent/consul/xdscapacity/capacity_test.go index b3a3935a8806f..d26453feae607 100644 --- a/agent/consul/xdscapacity/capacity_test.go +++ b/agent/consul/xdscapacity/capacity_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package xdscapacity diff --git a/agent/coordinate_endpoint.go b/agent/coordinate_endpoint.go index 60b69244afd5d..744498c055336 100644 --- a/agent/coordinate_endpoint.go +++ b/agent/coordinate_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/coordinate_endpoint_test.go b/agent/coordinate_endpoint_test.go index 508f308d13c7c..fe6deeef9567e 100644 --- a/agent/coordinate_endpoint_test.go +++ b/agent/coordinate_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/debug/host.go b/agent/debug/host.go index f863117e547b0..5116bf7499f7e 100644 --- a/agent/debug/host.go +++ b/agent/debug/host.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package debug diff --git a/agent/debug/host_test.go b/agent/debug/host_test.go index dce469b542f2a..1289e21b4f066 100644 --- a/agent/debug/host_test.go +++ b/agent/debug/host_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package debug diff --git a/agent/delegate_mock_test.go b/agent/delegate_mock_test.go index a75cf2d1e2624..9f91a6a0d919b 100644 --- a/agent/delegate_mock_test.go +++ b/agent/delegate_mock_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -15,7 +15,6 @@ import ( "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/proto-public/pbresource" ) type delegateMock struct { @@ -77,7 +76,3 @@ func (m *delegateMock) Stats() map[string]map[string]string { func (m *delegateMock) ReloadConfig(config consul.ReloadableConfig) error { return m.Called(config).Error(0) } - -func (m *delegateMock) ResourceServiceClient() pbresource.ResourceServiceClient { - return nil -} diff --git a/agent/denylist.go b/agent/denylist.go index 5fdd8cc9f8721..b621465298146 100644 --- a/agent/denylist.go +++ b/agent/denylist.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/denylist_test.go b/agent/denylist_test.go index dd77e977d94d7..f9370723a7c30 100644 --- a/agent/denylist_test.go +++ b/agent/denylist_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/discovery/discovery.go b/agent/discovery/discovery.go deleted file mode 100644 index b8c6cb6ab877e..0000000000000 --- a/agent/discovery/discovery.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package discovery - -import ( - "fmt" - "net" - - "github.com/hashicorp/consul/agent/config" -) - -var ( - ErrECSNotGlobal = fmt.Errorf("ECS response is not global") - ErrNoData = fmt.Errorf("no data") - ErrNotFound = fmt.Errorf("not found") - ErrNotSupported = fmt.Errorf("not supported") - ErrNoPathToDatacenter = fmt.Errorf("no path to datacenter") -) - -// ECSNotGlobalError may be used to wrap an error or nil, to indicate that the -// EDNS client subnet source scope is not global. -type ECSNotGlobalError struct { - error -} - -func (e ECSNotGlobalError) Error() string { - if e.error == nil { - return "" - } - return e.error.Error() -} - -func (e ECSNotGlobalError) Is(other error) bool { - return other == ErrECSNotGlobal -} - -func (e ECSNotGlobalError) Unwrap() error { - return e.error -} - -// Query is used to request a name-based Service Discovery lookup. -type Query struct { - QueryType QueryType - QueryPayload QueryPayload -} - -// QueryType is used to filter service endpoints. -// This is needed by the V1 catalog because of the -// overlapping lookups through the service endpoint. -type QueryType string - -const ( - QueryTypeConnect QueryType = "CONNECT" // deprecated: use for V1 only - QueryTypeIngress QueryType = "INGRESS" // deprecated: use for V1 only - QueryTypeInvalid QueryType = "INVALID" - QueryTypeNode QueryType = "NODE" - QueryTypePreparedQuery QueryType = "PREPARED_QUERY" // deprecated: use for V1 only - QueryTypeService QueryType = "SERVICE" - QueryTypeVirtual QueryType = "VIRTUAL" - QueryTypeWorkload QueryType = "WORKLOAD" // V2-only -) - -// Context is used to pass information about the request. -type Context struct { - Token string -} - -// QueryTenancy is used to filter catalog data based on tenancy. -type QueryTenancy struct { - Namespace string - Partition string - SamenessGroup string - Peer string - Datacenter string -} - -// QueryPayload represents all information needed by the data backend -// to decide which records to include. -type QueryPayload struct { - Name string - PortName string // v1 - this could optionally be "connect" or "ingress"; v2 - this is the service port name - Tag string // deprecated: use for V1 only - SourceIP net.IP // deprecated: used for prepared queries - Tenancy QueryTenancy // tenancy includes any additional labels specified before the domain - Limit int // The maximum number of records to return - - // v2 fields only - EnableFailover bool -} - -// ResultType indicates the Consul resource that a discovery record represents. -// This is useful for things like adding TTLs for different objects in the DNS. -type ResultType string - -const ( - ResultTypeService ResultType = "SERVICE" - ResultTypeNode ResultType = "NODE" - ResultTypeVirtual ResultType = "VIRTUAL" - ResultTypeWorkload ResultType = "WORKLOAD" -) - -// Result is a generic format of targets that could be returned in a query. -// It is the responsibility of the DNS encoder to know what to do with -// each Result, based on the query type. -type Result struct { - Service *Location // The name and address of the service. - Node *Location // The name and address of the node. - Weight uint32 // SRV queries - Metadata map[string]string // Used to collect metadata into TXT Records - Type ResultType // Used to reconstruct the fqdn name of the resource - DNS DNSConfig // Used for DNS-specific configuration for this result - - // Ports include anything the node/service/workload implements. These are filtered if requested by the client. - // They are used in to generate the FQDN and SRV port numbers in V2 Catalog responses. - Ports []Port - - Tenancy ResultTenancy -} - -// TaggedAddress is used to represent a tagged address. -type TaggedAddress struct { - Name string - Address string - Port Port -} - -// Location is used to represent a service, node, or workload. -type Location struct { - Name string - Address string - TaggedAddresses map[string]*TaggedAddress // Used to collect tagged addresses into A/AAAA Records -} - -type DNSConfig struct { - TTL *uint32 // deprecated: use for V1 prepared queries only - Weight uint32 // SRV queries -} - -type Port struct { - Name string - Number uint32 -} - -// ResultTenancy is used to reconstruct the fqdn name of the resource. -type ResultTenancy struct { - Namespace string - Partition string - PeerName string - Datacenter string -} - -// LookupType is used by the CatalogDataFetcher to properly filter endpoints. -type LookupType string - -const ( - LookupTypeService LookupType = "SERVICE" - LookupTypeConnect LookupType = "CONNECT" - LookupTypeIngress LookupType = "INGRESS" -) - -// CatalogDataFetcher is an interface that abstracts data collection -// for Discovery queries. It is assumed that the instantiation also -// includes any agent configuration that influences catalog queries. -// -//go:generate mockery --name CatalogDataFetcher --inpackage -type CatalogDataFetcher interface { - // LoadConfig is used to hot-reload the data fetcher with new agent config. - LoadConfig(config *config.RuntimeConfig) - - // FetchNodes fetches A/AAAA/CNAME - FetchNodes(ctx Context, req *QueryPayload) ([]*Result, error) - - // FetchEndpoints fetches records for A/AAAA/CNAME or SRV requests for services - FetchEndpoints(ctx Context, req *QueryPayload, lookupType LookupType) ([]*Result, error) - - // FetchVirtualIP fetches A/AAAA records for virtual IPs - FetchVirtualIP(ctx Context, req *QueryPayload) (*Result, error) - - // FetchRecordsByIp is used for PTR requests - // to look up a service/node from an IP. - FetchRecordsByIp(ctx Context, ip net.IP) ([]*Result, error) - - // FetchWorkload fetches a single Result associated with - // V2 Workload. V2-only. - FetchWorkload(ctx Context, req *QueryPayload) (*Result, error) - - // FetchPreparedQuery evaluates the results of a prepared query. - // deprecated in V2 - FetchPreparedQuery(ctx Context, req *QueryPayload) ([]*Result, error) - - // NormalizeRequest mutates the original request based on data fetcher configuration, like - // defaulting tenancy to the agent's partition. - NormalizeRequest(req *QueryPayload) - - // ValidateRequest throws an error is any of the input fields are invalid for this data fetcher. - ValidateRequest(ctx Context, req *QueryPayload) error -} - -// QueryProcessor is used to process a Discovery Query and return the results. -type QueryProcessor struct { - dataFetcher CatalogDataFetcher -} - -// NewQueryProcessor creates a new QueryProcessor. -func NewQueryProcessor(dataFetcher CatalogDataFetcher) *QueryProcessor { - return &QueryProcessor{ - dataFetcher: dataFetcher, - } -} - -// QueryByName is used to look up a service, node, workload, or prepared query. -func (p *QueryProcessor) QueryByName(query *Query, ctx Context) ([]*Result, error) { - if err := p.dataFetcher.ValidateRequest(ctx, &query.QueryPayload); err != nil { - return nil, err - } - - p.dataFetcher.NormalizeRequest(&query.QueryPayload) - - switch query.QueryType { - case QueryTypeNode: - return p.dataFetcher.FetchNodes(ctx, &query.QueryPayload) - case QueryTypeService: - return p.dataFetcher.FetchEndpoints(ctx, &query.QueryPayload, LookupTypeService) - case QueryTypeConnect: - return p.dataFetcher.FetchEndpoints(ctx, &query.QueryPayload, LookupTypeConnect) - case QueryTypeIngress: - return p.dataFetcher.FetchEndpoints(ctx, &query.QueryPayload, LookupTypeIngress) - case QueryTypeVirtual: - result, err := p.dataFetcher.FetchVirtualIP(ctx, &query.QueryPayload) - if err != nil { - return nil, err - } - return []*Result{result}, nil - case QueryTypeWorkload: - result, err := p.dataFetcher.FetchWorkload(ctx, &query.QueryPayload) - if err != nil { - return nil, err - } - return []*Result{result}, nil - case QueryTypePreparedQuery: - return p.dataFetcher.FetchPreparedQuery(ctx, &query.QueryPayload) - default: - return nil, fmt.Errorf("unknown query type: %s", query.QueryType) - } -} - -// QueryByIP is used to look up a service or node from an IP address. -func (p *QueryProcessor) QueryByIP(ip net.IP, reqCtx Context) ([]*Result, error) { - return p.dataFetcher.FetchRecordsByIp(reqCtx, ip) -} diff --git a/agent/discovery/discovery_test.go b/agent/discovery/discovery_test.go deleted file mode 100644 index a53ec7b866bff..0000000000000 --- a/agent/discovery/discovery_test.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package discovery - -import ( - "errors" - "net" - "testing" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -var ( - testContext = Context{ - Token: "bar", - } - - testErr = errors.New("test error") - - testIP = net.ParseIP("1.2.3.4") - - testPayload = QueryPayload{ - Name: "foo", - } - - testResult = &Result{ - Node: &Location{Address: "1.2.3.4"}, - Type: ResultTypeNode, // This isn't correct for some test cases, but we are only asserting the right data fetcher functions are called - Service: &Location{Name: "foo"}, - } -) - -func TestQueryByName(t *testing.T) { - - type testCase struct { - name string - reqType QueryType - configureDataFetcher func(*testing.T, *MockCatalogDataFetcher) - expectedResults []*Result - expectedError error - } - - run := func(t *testing.T, tc testCase) { - - fetcher := NewMockCatalogDataFetcher(t) - tc.configureDataFetcher(t, fetcher) - - qp := NewQueryProcessor(fetcher) - - q := Query{ - QueryType: tc.reqType, - QueryPayload: testPayload, - } - - results, err := qp.QueryByName(&q, testContext) - if tc.expectedError != nil { - require.Error(t, err) - require.True(t, errors.Is(err, tc.expectedError)) - require.Nil(t, results) - return - } - require.NoError(t, err) - require.Equal(t, tc.expectedResults, results) - } - - testCases := []testCase{ - { - name: "query node", - reqType: QueryTypeNode, - configureDataFetcher: func(t *testing.T, fetcher *MockCatalogDataFetcher) { - - fetcher.On("ValidateRequest", mock.Anything, mock.Anything).Return(nil) - fetcher.On("NormalizeRequest", mock.Anything) - fetcher.On("FetchNodes", mock.Anything, mock.Anything).Return([]*Result{testResult}, nil) - }, - expectedResults: []*Result{testResult}, - }, - { - name: "query service", - reqType: QueryTypeService, - configureDataFetcher: func(t *testing.T, fetcher *MockCatalogDataFetcher) { - - fetcher.On("ValidateRequest", mock.Anything, mock.Anything).Return(nil) - fetcher.On("NormalizeRequest", mock.Anything) - fetcher.On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything).Return([]*Result{testResult}, nil) - }, - expectedResults: []*Result{testResult}, - }, - { - name: "query connect", - reqType: QueryTypeConnect, - configureDataFetcher: func(t *testing.T, fetcher *MockCatalogDataFetcher) { - - fetcher.On("ValidateRequest", mock.Anything, mock.Anything).Return(nil) - fetcher.On("NormalizeRequest", mock.Anything) - fetcher.On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything).Return([]*Result{testResult}, nil) - }, - expectedResults: []*Result{testResult}, - }, - { - name: "query ingress", - reqType: QueryTypeIngress, - configureDataFetcher: func(t *testing.T, fetcher *MockCatalogDataFetcher) { - - fetcher.On("ValidateRequest", mock.Anything, mock.Anything).Return(nil) - fetcher.On("NormalizeRequest", mock.Anything) - fetcher.On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything).Return([]*Result{testResult}, nil) - }, - expectedResults: []*Result{testResult}, - }, - { - name: "query virtual ip", - reqType: QueryTypeVirtual, - configureDataFetcher: func(t *testing.T, fetcher *MockCatalogDataFetcher) { - - fetcher.On("ValidateRequest", mock.Anything, mock.Anything).Return(nil) - fetcher.On("NormalizeRequest", mock.Anything) - fetcher.On("FetchVirtualIP", mock.Anything, mock.Anything).Return(testResult, nil) - }, - expectedResults: []*Result{testResult}, - }, - { - name: "query workload", - reqType: QueryTypeWorkload, - configureDataFetcher: func(t *testing.T, fetcher *MockCatalogDataFetcher) { - - fetcher.On("ValidateRequest", mock.Anything, mock.Anything).Return(nil) - fetcher.On("NormalizeRequest", mock.Anything) - fetcher.On("FetchWorkload", mock.Anything, mock.Anything).Return(testResult, nil) - }, - expectedResults: []*Result{testResult}, - }, - { - name: "query prepared query", - reqType: QueryTypePreparedQuery, - configureDataFetcher: func(t *testing.T, fetcher *MockCatalogDataFetcher) { - - fetcher.On("ValidateRequest", mock.Anything, mock.Anything).Return(nil) - fetcher.On("NormalizeRequest", mock.Anything) - fetcher.On("FetchPreparedQuery", mock.Anything, mock.Anything).Return([]*Result{testResult}, nil) - }, - expectedResults: []*Result{testResult}, - }, - { - name: "returns error from validation", - reqType: QueryTypePreparedQuery, - configureDataFetcher: func(t *testing.T, fetcher *MockCatalogDataFetcher) { - fetcher.On("ValidateRequest", mock.Anything, mock.Anything).Return(testErr) - }, - expectedError: testErr, - }, - { - name: "returns error from fetcher", - reqType: QueryTypePreparedQuery, - configureDataFetcher: func(t *testing.T, fetcher *MockCatalogDataFetcher) { - fetcher.On("ValidateRequest", mock.Anything, mock.Anything).Return(nil) - fetcher.On("NormalizeRequest", mock.Anything) - fetcher.On("FetchPreparedQuery", mock.Anything, mock.Anything).Return(nil, testErr) - }, - expectedError: testErr, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - run(t, tc) - }) - } -} - -func TestQueryByIP(t *testing.T) { - type testCase struct { - name string - configureDataFetcher func(*testing.T, *MockCatalogDataFetcher) - expectedResults []*Result - expectedError error - } - - run := func(t *testing.T, tc testCase) { - - fetcher := NewMockCatalogDataFetcher(t) - tc.configureDataFetcher(t, fetcher) - - qp := NewQueryProcessor(fetcher) - - results, err := qp.QueryByIP(testIP, testContext) - if tc.expectedError != nil { - require.Error(t, err) - require.True(t, errors.Is(err, tc.expectedError)) - require.Nil(t, results) - return - } - require.NoError(t, err) - require.Equal(t, tc.expectedResults, results) - } - - testCases := []testCase{ - { - name: "query by IP", - configureDataFetcher: func(t *testing.T, fetcher *MockCatalogDataFetcher) { - fetcher.On("FetchRecordsByIp", mock.Anything, mock.Anything).Return([]*Result{testResult}, nil) - }, - expectedResults: []*Result{testResult}, - }, - { - name: "returns error from fetcher", - configureDataFetcher: func(t *testing.T, fetcher *MockCatalogDataFetcher) { - fetcher.On("FetchRecordsByIp", mock.Anything, mock.Anything).Return(nil, testErr) - }, - expectedError: testErr, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - run(t, tc) - }) - } -} diff --git a/agent/discovery/mock_CatalogDataFetcher.go b/agent/discovery/mock_CatalogDataFetcher.go deleted file mode 100644 index f80a6010d2d6c..0000000000000 --- a/agent/discovery/mock_CatalogDataFetcher.go +++ /dev/null @@ -1,209 +0,0 @@ -// Code generated by mockery v2.37.1. DO NOT EDIT. - -package discovery - -import ( - config "github.com/hashicorp/consul/agent/config" - mock "github.com/stretchr/testify/mock" - - net "net" -) - -// MockCatalogDataFetcher is an autogenerated mock type for the CatalogDataFetcher type -type MockCatalogDataFetcher struct { - mock.Mock -} - -// FetchEndpoints provides a mock function with given fields: ctx, req, lookupType -func (_m *MockCatalogDataFetcher) FetchEndpoints(ctx Context, req *QueryPayload, lookupType LookupType) ([]*Result, error) { - ret := _m.Called(ctx, req, lookupType) - - var r0 []*Result - var r1 error - if rf, ok := ret.Get(0).(func(Context, *QueryPayload, LookupType) ([]*Result, error)); ok { - return rf(ctx, req, lookupType) - } - if rf, ok := ret.Get(0).(func(Context, *QueryPayload, LookupType) []*Result); ok { - r0 = rf(ctx, req, lookupType) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*Result) - } - } - - if rf, ok := ret.Get(1).(func(Context, *QueryPayload, LookupType) error); ok { - r1 = rf(ctx, req, lookupType) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchNodes provides a mock function with given fields: ctx, req -func (_m *MockCatalogDataFetcher) FetchNodes(ctx Context, req *QueryPayload) ([]*Result, error) { - ret := _m.Called(ctx, req) - - var r0 []*Result - var r1 error - if rf, ok := ret.Get(0).(func(Context, *QueryPayload) ([]*Result, error)); ok { - return rf(ctx, req) - } - if rf, ok := ret.Get(0).(func(Context, *QueryPayload) []*Result); ok { - r0 = rf(ctx, req) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*Result) - } - } - - if rf, ok := ret.Get(1).(func(Context, *QueryPayload) error); ok { - r1 = rf(ctx, req) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchPreparedQuery provides a mock function with given fields: ctx, req -func (_m *MockCatalogDataFetcher) FetchPreparedQuery(ctx Context, req *QueryPayload) ([]*Result, error) { - ret := _m.Called(ctx, req) - - var r0 []*Result - var r1 error - if rf, ok := ret.Get(0).(func(Context, *QueryPayload) ([]*Result, error)); ok { - return rf(ctx, req) - } - if rf, ok := ret.Get(0).(func(Context, *QueryPayload) []*Result); ok { - r0 = rf(ctx, req) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*Result) - } - } - - if rf, ok := ret.Get(1).(func(Context, *QueryPayload) error); ok { - r1 = rf(ctx, req) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchRecordsByIp provides a mock function with given fields: ctx, ip -func (_m *MockCatalogDataFetcher) FetchRecordsByIp(ctx Context, ip net.IP) ([]*Result, error) { - ret := _m.Called(ctx, ip) - - var r0 []*Result - var r1 error - if rf, ok := ret.Get(0).(func(Context, net.IP) ([]*Result, error)); ok { - return rf(ctx, ip) - } - if rf, ok := ret.Get(0).(func(Context, net.IP) []*Result); ok { - r0 = rf(ctx, ip) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*Result) - } - } - - if rf, ok := ret.Get(1).(func(Context, net.IP) error); ok { - r1 = rf(ctx, ip) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchVirtualIP provides a mock function with given fields: ctx, req -func (_m *MockCatalogDataFetcher) FetchVirtualIP(ctx Context, req *QueryPayload) (*Result, error) { - ret := _m.Called(ctx, req) - - var r0 *Result - var r1 error - if rf, ok := ret.Get(0).(func(Context, *QueryPayload) (*Result, error)); ok { - return rf(ctx, req) - } - if rf, ok := ret.Get(0).(func(Context, *QueryPayload) *Result); ok { - r0 = rf(ctx, req) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*Result) - } - } - - if rf, ok := ret.Get(1).(func(Context, *QueryPayload) error); ok { - r1 = rf(ctx, req) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FetchWorkload provides a mock function with given fields: ctx, req -func (_m *MockCatalogDataFetcher) FetchWorkload(ctx Context, req *QueryPayload) (*Result, error) { - ret := _m.Called(ctx, req) - - var r0 *Result - var r1 error - if rf, ok := ret.Get(0).(func(Context, *QueryPayload) (*Result, error)); ok { - return rf(ctx, req) - } - if rf, ok := ret.Get(0).(func(Context, *QueryPayload) *Result); ok { - r0 = rf(ctx, req) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*Result) - } - } - - if rf, ok := ret.Get(1).(func(Context, *QueryPayload) error); ok { - r1 = rf(ctx, req) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// LoadConfig provides a mock function with given fields: _a0 -func (_m *MockCatalogDataFetcher) LoadConfig(_a0 *config.RuntimeConfig) { - _m.Called(_a0) -} - -// NormalizeRequest provides a mock function with given fields: req -func (_m *MockCatalogDataFetcher) NormalizeRequest(req *QueryPayload) { - _m.Called(req) -} - -// ValidateRequest provides a mock function with given fields: ctx, req -func (_m *MockCatalogDataFetcher) ValidateRequest(ctx Context, req *QueryPayload) error { - ret := _m.Called(ctx, req) - - var r0 error - if rf, ok := ret.Get(0).(func(Context, *QueryPayload) error); ok { - r0 = rf(ctx, req) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// NewMockCatalogDataFetcher creates a new instance of MockCatalogDataFetcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockCatalogDataFetcher(t interface { - mock.TestingT - Cleanup(func()) -}) *MockCatalogDataFetcher { - mock := &MockCatalogDataFetcher{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/agent/discovery/query_fetcher_v1.go b/agent/discovery/query_fetcher_v1.go deleted file mode 100644 index fc71ae60e90ed..0000000000000 --- a/agent/discovery/query_fetcher_v1.go +++ /dev/null @@ -1,650 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package discovery - -import ( - "context" - "errors" - "fmt" - "net" - "strings" - "sync/atomic" - "time" - - "github.com/armon/go-metrics" - "github.com/armon/go-metrics/prometheus" - - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/cache" - cachetype "github.com/hashicorp/consul/agent/cache-types" - "github.com/hashicorp/consul/agent/config" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" -) - -const ( - // Increment a counter when requests staler than this are served - staleCounterThreshold = 5 * time.Second -) - -// DNSCounters pre-registers the staleness metric. -// This value is used by both the V1 and V2 DNS (V1 Catalog-only) servers. -var DNSCounters = []prometheus.CounterDefinition{ - { - Name: []string{"dns", "stale_queries"}, - Help: "Increments when an agent serves a query within the allowed stale threshold.", - }, -} - -// v1DataFetcherDynamicConfig is used to store the dynamic configuration of the V1 data fetcher. -type v1DataFetcherDynamicConfig struct { - // Default request tenancy - datacenter string - - segmentName string - nodeName string - nodePartition string - - // Catalog configuration - allowStale bool - maxStale time.Duration - useCache bool - cacheMaxAge time.Duration - onlyPassing bool -} - -// V1DataFetcher is used to fetch data from the V1 catalog. -type V1DataFetcher struct { - defaultEnterpriseMeta acl.EnterpriseMeta - dynamicConfig atomic.Value - logger hclog.Logger - - getFromCacheFunc func(ctx context.Context, t string, r cache.Request) (interface{}, cache.ResultMeta, error) - rpcFunc func(ctx context.Context, method string, args interface{}, reply interface{}) error - rpcFuncForServiceNodes func(ctx context.Context, req structs.ServiceSpecificRequest) (structs.IndexedCheckServiceNodes, cache.ResultMeta, error) - rpcFuncForSamenessGroup func(ctx context.Context, req *structs.ConfigEntryQuery) (structs.SamenessGroupConfigEntry, cache.ResultMeta, error) - translateServicePortFunc func(dc string, port int, taggedAddresses map[string]structs.ServiceAddress) int -} - -// NewV1DataFetcher creates a new V1 data fetcher. -func NewV1DataFetcher(config *config.RuntimeConfig, - entMeta *acl.EnterpriseMeta, - getFromCacheFunc func(ctx context.Context, t string, r cache.Request) (interface{}, cache.ResultMeta, error), - rpcFunc func(ctx context.Context, method string, args interface{}, reply interface{}) error, - rpcFuncForServiceNodes func(ctx context.Context, req structs.ServiceSpecificRequest) (structs.IndexedCheckServiceNodes, cache.ResultMeta, error), - rpcFuncForSamenessGroup func(ctx context.Context, req *structs.ConfigEntryQuery) (structs.SamenessGroupConfigEntry, cache.ResultMeta, error), - translateServicePortFunc func(dc string, port int, taggedAddresses map[string]structs.ServiceAddress) int, - logger hclog.Logger) *V1DataFetcher { - f := &V1DataFetcher{ - defaultEnterpriseMeta: *entMeta, - getFromCacheFunc: getFromCacheFunc, - rpcFunc: rpcFunc, - rpcFuncForServiceNodes: rpcFuncForServiceNodes, - rpcFuncForSamenessGroup: rpcFuncForSamenessGroup, - translateServicePortFunc: translateServicePortFunc, - logger: logger, - } - f.LoadConfig(config) - return f -} - -// LoadConfig loads the configuration for the V1 data fetcher. -func (f *V1DataFetcher) LoadConfig(config *config.RuntimeConfig) { - dynamicConfig := &v1DataFetcherDynamicConfig{ - allowStale: config.DNSAllowStale, - maxStale: config.DNSMaxStale, - useCache: config.DNSUseCache, - cacheMaxAge: config.DNSCacheMaxAge, - onlyPassing: config.DNSOnlyPassing, - datacenter: config.Datacenter, - segmentName: config.SegmentName, - nodeName: config.NodeName, - } - f.dynamicConfig.Store(dynamicConfig) -} - -// FetchNodes fetches A/AAAA/CNAME -func (f *V1DataFetcher) FetchNodes(ctx Context, req *QueryPayload) ([]*Result, error) { - if req.Tenancy.Namespace != "" && req.Tenancy.Namespace != acl.DefaultNamespaceName { - // Nodes are not namespaced, so this is a name error - return nil, ErrNotFound - } - - cfg := f.dynamicConfig.Load().(*v1DataFetcherDynamicConfig) - // Make an RPC request - args := &structs.NodeSpecificRequest{ - Datacenter: req.Tenancy.Datacenter, - PeerName: req.Tenancy.Peer, - Node: req.Name, - QueryOptions: structs.QueryOptions{ - Token: ctx.Token, - AllowStale: cfg.allowStale, - }, - EnterpriseMeta: queryTenancyToEntMeta(req.Tenancy), - } - out, err := f.fetchNode(cfg, args) - if err != nil { - return nil, fmt.Errorf("failed rpc request: %w", err) - } - - // If we have no out.NodeServices.Nodeaddress, return not found! - if out.NodeServices == nil { - return nil, ErrNotFound - } - - results := make([]*Result, 0, 1) - n := out.NodeServices.Node - - results = append(results, &Result{ - Node: &Location{ - Name: n.Node, - Address: n.Address, - TaggedAddresses: makeTaggedAddressesFromStrings(n.TaggedAddresses), - }, - Type: ResultTypeNode, - Metadata: n.Meta, - - Tenancy: ResultTenancy{ - // Namespace is not required because nodes are not namespaced - Partition: n.GetEnterpriseMeta().PartitionOrDefault(), - Datacenter: n.Datacenter, - }, - }) - - return results, nil -} - -// FetchEndpoints fetches records for A/AAAA/CNAME or SRV requests for services -func (f *V1DataFetcher) FetchEndpoints(ctx Context, req *QueryPayload, lookupType LookupType) ([]*Result, error) { - f.logger.Trace(fmt.Sprintf("FetchEndpoints - req: %+v / lookupType: %+v", req, lookupType)) - cfg := f.dynamicConfig.Load().(*v1DataFetcherDynamicConfig) - return f.fetchService(ctx, req, cfg, lookupType) -} - -// FetchVirtualIP fetches A/AAAA records for virtual IPs -func (f *V1DataFetcher) FetchVirtualIP(ctx Context, req *QueryPayload) (*Result, error) { - args := structs.ServiceSpecificRequest{ - // The datacenter of the request is not specified because cross-datacenter virtual IP - // queries are not supported. This guard rail is in place because virtual IPs are allocated - // within a DC, therefore their uniqueness is not guaranteed globally. - PeerName: req.Tenancy.Peer, - ServiceName: req.Name, - EnterpriseMeta: queryTenancyToEntMeta(req.Tenancy), - QueryOptions: structs.QueryOptions{ - Token: ctx.Token, - }, - } - - var out string - if err := f.rpcFunc(context.Background(), "Catalog.VirtualIPForService", &args, &out); err != nil { - return nil, err - } - - result := &Result{ - Service: &Location{ - Name: req.Name, - Address: out, - }, - Type: ResultTypeVirtual, - } - return result, nil -} - -// FetchRecordsByIp is used for PTR requests to look up a service/node from an IP. -// The search is performed in the agent's partition and over all namespaces (or those allowed by the ACL token). -func (f *V1DataFetcher) FetchRecordsByIp(reqCtx Context, ip net.IP) ([]*Result, error) { - if ip == nil { - return nil, ErrNotSupported - } - - configCtx := f.dynamicConfig.Load().(*v1DataFetcherDynamicConfig) - targetIP := ip.String() - - var results []*Result - - args := structs.DCSpecificRequest{ - Datacenter: configCtx.datacenter, - QueryOptions: structs.QueryOptions{ - Token: reqCtx.Token, - AllowStale: configCtx.allowStale, - }, - } - var out structs.IndexedNodes - - // TODO: Replace ListNodes with an internal RPC that can do the filter - // server side to avoid transferring the entire node list. - if err := f.rpcFunc(context.Background(), "Catalog.ListNodes", &args, &out); err == nil { - for _, n := range out.Nodes { - if targetIP == n.Address { - results = append(results, &Result{ - Node: &Location{ - Name: n.Node, - Address: n.Address, - TaggedAddresses: makeTaggedAddressesFromStrings(n.TaggedAddresses), - }, - Type: ResultTypeNode, - Tenancy: ResultTenancy{ - Namespace: f.defaultEnterpriseMeta.NamespaceOrDefault(), - Partition: f.defaultEnterpriseMeta.PartitionOrDefault(), - Datacenter: configCtx.datacenter, - }, - }) - return results, nil - } - } - } - - // only look into the services if we didn't find a node - sargs := structs.ServiceSpecificRequest{ - Datacenter: configCtx.datacenter, - QueryOptions: structs.QueryOptions{ - Token: reqCtx.Token, - AllowStale: configCtx.allowStale, - }, - ServiceAddress: targetIP, - EnterpriseMeta: *f.defaultEnterpriseMeta.WithWildcardNamespace(), - } - - var sout structs.IndexedServiceNodes - if err := f.rpcFunc(context.Background(), "Catalog.ServiceNodes", &sargs, &sout); err == nil { - if len(sout.ServiceNodes) == 0 { - return nil, ErrNotFound - } - - for _, n := range sout.ServiceNodes { - if n.ServiceAddress == targetIP { - results = append(results, &Result{ - Service: &Location{ - Name: n.ServiceName, - Address: n.ServiceAddress, - }, - Type: ResultTypeService, - Node: &Location{ - Name: n.Node, - Address: n.Address, - }, - Tenancy: ResultTenancy{ - Namespace: n.NamespaceOrEmpty(), - Partition: n.PartitionOrEmpty(), - Datacenter: n.Datacenter, - }, - }) - return results, nil - } - } - } - - // nothing found locally, recurse - // TODO: (v2-dns) implement recursion (NET-7883) - //d.handleRecurse(resp, req) - - return nil, fmt.Errorf("unhandled error in FetchRecordsByIp") -} - -// FetchWorkload fetches a single Result associated with -// V2 Workload. V2-only. -func (f *V1DataFetcher) FetchWorkload(ctx Context, req *QueryPayload) (*Result, error) { - return nil, ErrNotSupported -} - -// FetchPreparedQuery evaluates the results of a prepared query. -// deprecated in V2 -func (f *V1DataFetcher) FetchPreparedQuery(ctx Context, req *QueryPayload) ([]*Result, error) { - cfg := f.dynamicConfig.Load().(*v1DataFetcherDynamicConfig) - - // Execute the prepared query. - args := structs.PreparedQueryExecuteRequest{ - Datacenter: req.Tenancy.Datacenter, - QueryIDOrName: req.Name, - QueryOptions: structs.QueryOptions{ - Token: ctx.Token, - AllowStale: cfg.allowStale, - MaxAge: cfg.cacheMaxAge, - }, - - // Always pass the local agent through. In the DNS interface, there - // is no provision for passing additional query parameters, so we - // send the local agent's data through to allow distance sorting - // relative to ourself on the server side. - Agent: structs.QuerySource{ - Datacenter: cfg.datacenter, - Segment: cfg.segmentName, - Node: cfg.nodeName, - NodePartition: cfg.nodePartition, - }, - Source: structs.QuerySource{ - Ip: req.SourceIP.String(), - }, - } - - out, err := f.executePreparedQuery(cfg, args) - if err != nil { - // errors.Is() doesn't work with errors.New() so we need to check the error message. - if err.Error() == structs.ErrQueryNotFound.Error() { - err = ErrNotFound - } - return nil, ECSNotGlobalError{err} - } - - // TODO (slackpad) - What's a safe limit we can set here? It seems like - // with dup filtering done at this level we need to get everything to - // match the previous behavior. We can optimize by pushing more filtering - // into the query execution, but for now I think we need to get the full - // response. We could also choose a large arbitrary number that will - // likely work in practice, like 10*maxUDPAnswerLimit which should help - // reduce bandwidth if there are thousands of nodes available. - // Determine the TTL. The parse should never fail since we vet it when - // the query is created, but we check anyway. If the query didn't - // specify a TTL then we will try to use the agent's service-specific - // TTL configs. - - // Check is there is a TTL provided as part of the prepared query - var ttlOverride *uint32 - if out.DNS.TTL != "" { - ttl, err := time.ParseDuration(out.DNS.TTL) - if err == nil { - ttlSec := uint32(ttl / time.Second) - ttlOverride = &ttlSec - } - f.logger.Warn("Failed to parse TTL for prepared query , ignoring", - "ttl", out.DNS.TTL, - "prepared_query", req.Name, - ) - } - - // If we have no nodes, return not found! - if len(out.Nodes) == 0 { - return nil, ECSNotGlobalError{ErrNotFound} - } - - // Perform a random shuffle - out.Nodes.Shuffle() - return f.buildResultsFromServiceNodes(out.Nodes, req, ttlOverride), ECSNotGlobalError{} -} - -// executePreparedQuery is used to execute a PreparedQuery against the Consul catalog. -// If the config is set to UseCache, it will use agent cache. -func (f *V1DataFetcher) executePreparedQuery(cfg *v1DataFetcherDynamicConfig, args structs.PreparedQueryExecuteRequest) (*structs.PreparedQueryExecuteResponse, error) { - var out structs.PreparedQueryExecuteResponse - -RPC: - if cfg.useCache { - raw, m, err := f.getFromCacheFunc(context.TODO(), cachetype.PreparedQueryName, &args) - if err != nil { - return nil, err - } - reply, ok := raw.(*structs.PreparedQueryExecuteResponse) - if !ok { - // This should never happen, but we want to protect against panics - return nil, err - } - - f.logger.Trace("cache results for prepared query", - "cache_hit", m.Hit, - "prepared_query", args.QueryIDOrName, - ) - - out = *reply - } else { - if err := f.rpcFunc(context.Background(), "PreparedQuery.Execute", &args, &out); err != nil { - return nil, err - } - } - - // Verify that request is not too stale, redo the request. - if args.AllowStale { - if out.LastContact > cfg.maxStale { - args.AllowStale = false - f.logger.Warn("Query results too stale, re-requesting") - goto RPC - } else if out.LastContact > staleCounterThreshold { - metrics.IncrCounter([]string{"dns", "stale_queries"}, 1) - } - } - - return &out, nil -} - -func (f *V1DataFetcher) ValidateRequest(_ Context, req *QueryPayload) error { - if req.EnableFailover { - return ErrNotSupported - } - if req.PortName != "" { - return ErrNotSupported - } - return validateEnterpriseTenancy(req.Tenancy) -} - -// buildResultsFromServiceNodes builds a list of results from a list of nodes. -func (f *V1DataFetcher) buildResultsFromServiceNodes(nodes []structs.CheckServiceNode, req *QueryPayload, ttlOverride *uint32) []*Result { - // Convert the service endpoints to results up to the limit - limit := req.Limit - if len(nodes) < limit || limit == 0 { - limit = len(nodes) - } - - results := make([]*Result, 0, limit) - for idx := 0; idx < limit; idx++ { - n := nodes[idx] - results = append(results, &Result{ - Service: &Location{ - Name: n.Service.Service, - Address: n.Service.Address, - TaggedAddresses: makeTaggedAddressesFromServiceAddresses(n.Service.TaggedAddresses), - }, - Node: &Location{ - Name: n.Node.Node, - Address: n.Node.Address, - TaggedAddresses: makeTaggedAddressesFromStrings(n.Node.TaggedAddresses), - }, - Type: ResultTypeService, - DNS: DNSConfig{ - TTL: ttlOverride, - Weight: uint32(findWeight(n)), - }, - Ports: []Port{ - {Number: uint32(f.translateServicePortFunc(n.Node.Datacenter, n.Service.Port, n.Service.TaggedAddresses))}, - }, - Metadata: n.Node.Meta, - Tenancy: ResultTenancy{ - Namespace: n.Service.NamespaceOrEmpty(), - Partition: n.Service.PartitionOrEmpty(), - Datacenter: n.Node.Datacenter, - PeerName: req.Tenancy.Peer, - }, - }) - } - return results -} - -// makeTaggedAddressesFromServiceAddresses is used to convert a map of service addresses to a map of Locations. -func makeTaggedAddressesFromServiceAddresses(tagged map[string]structs.ServiceAddress) map[string]*TaggedAddress { - taggedAddresses := make(map[string]*TaggedAddress) - for k, v := range tagged { - taggedAddresses[k] = &TaggedAddress{ - Name: k, - Address: v.Address, - Port: Port{ - Number: uint32(v.Port), - }, - } - } - return taggedAddresses -} - -// makeTaggedAddressesFromStrings is used to convert a map of strings to a map of Locations. -func makeTaggedAddressesFromStrings(tagged map[string]string) map[string]*TaggedAddress { - taggedAddresses := make(map[string]*TaggedAddress) - for k, v := range tagged { - taggedAddresses[k] = &TaggedAddress{ - Name: k, - Address: v, - } - } - return taggedAddresses -} - -// fetchNode is used to look up a node in the Consul catalog within NodeServices. -// If the config is set to UseCache, it will get the record from the agent cache. -func (f *V1DataFetcher) fetchNode(cfg *v1DataFetcherDynamicConfig, args *structs.NodeSpecificRequest) (*structs.IndexedNodeServices, error) { - var out structs.IndexedNodeServices - - useCache := cfg.useCache -RPC: - if useCache { - raw, _, err := f.getFromCacheFunc(context.TODO(), cachetype.NodeServicesName, args) - if err != nil { - return nil, err - } - reply, ok := raw.(*structs.IndexedNodeServices) - if !ok { - // This should never happen, but we want to protect against panics - return nil, fmt.Errorf("internal error: response type not correct") - } - out = *reply - } else { - if err := f.rpcFunc(context.Background(), "Catalog.NodeServices", &args, &out); err != nil { - return nil, err - } - } - - // Verify that request is not too stale, redo the request - if args.AllowStale { - if out.LastContact > cfg.maxStale { - args.AllowStale = false - useCache = false - f.logger.Warn("Query results too stale, re-requesting") - goto RPC - } else if out.LastContact > staleCounterThreshold { - metrics.IncrCounter([]string{"dns", "stale_queries"}, 1) - } - } - - return &out, nil -} - -func (f *V1DataFetcher) fetchService(ctx Context, req *QueryPayload, - cfg *v1DataFetcherDynamicConfig, lookupType LookupType) ([]*Result, error) { - f.logger.Trace("fetchService", "req", req) - if req.Tenancy.SamenessGroup == "" { - return f.fetchServiceBasedOnTenancy(ctx, req, cfg, lookupType) - } - - return f.fetchServiceFromSamenessGroup(ctx, req, cfg, lookupType) -} - -// fetchServiceBasedOnTenancy is used to look up a service in the Consul catalog based on its tenancy or default tenancy. -func (f *V1DataFetcher) fetchServiceBasedOnTenancy(ctx Context, req *QueryPayload, - cfg *v1DataFetcherDynamicConfig, lookupType LookupType) ([]*Result, error) { - f.logger.Trace(fmt.Sprintf("fetchServiceBasedOnTenancy - req: %+v", req)) - if req.Tenancy.SamenessGroup != "" { - return nil, errors.New("sameness groups are not allowed for service lookups based on tenancy") - } - - datacenter := req.Tenancy.Datacenter - if req.Tenancy.Peer != "" { - datacenter = "" - } - - serviceTags := []string{} - if req.Tag != "" { - serviceTags = []string{req.Tag} - } - args := structs.ServiceSpecificRequest{ - PeerName: req.Tenancy.Peer, - Connect: lookupType == LookupTypeConnect, - Ingress: lookupType == LookupTypeIngress, - Datacenter: datacenter, - ServiceName: req.Name, - ServiceTags: serviceTags, - TagFilter: req.Tag != "", - QueryOptions: structs.QueryOptions{ - Token: ctx.Token, - AllowStale: cfg.allowStale, - MaxAge: cfg.cacheMaxAge, - UseCache: cfg.useCache, - MaxStaleDuration: cfg.maxStale, - }, - EnterpriseMeta: queryTenancyToEntMeta(req.Tenancy), - } - - out, _, err := f.rpcFuncForServiceNodes(context.TODO(), args) - if err != nil { - if strings.Contains(err.Error(), structs.ErrNoDCPath.Error()) { - return nil, ErrNoPathToDatacenter - } - return nil, fmt.Errorf("rpc request failed: %w", err) - } - - // If we have no nodes, return not found! - if len(out.Nodes) == 0 { - return nil, ErrNotFound - } - - // Filter out any service nodes due to health checks - // We copy the slice to avoid modifying the result if it comes from the cache - nodes := make(structs.CheckServiceNodes, len(out.Nodes)) - copy(nodes, out.Nodes) - out.Nodes = nodes.Filter(cfg.onlyPassing) - if err != nil { - return nil, fmt.Errorf("rpc request failed: %w", err) - } - - // If we have no nodes, return not found! - if len(out.Nodes) == 0 { - return nil, ErrNotFound - } - - // Perform a random shuffle - out.Nodes.Shuffle() - return f.buildResultsFromServiceNodes(out.Nodes, req, nil), nil -} - -// findWeight returns the weight of a service node. -func findWeight(node structs.CheckServiceNode) int { - // By default, when only_passing is false, warning and passing nodes are returned - // Those values will be used if using a client with support while server has no - // support for weights - weightPassing := 1 - weightWarning := 1 - if node.Service.Weights != nil { - weightPassing = node.Service.Weights.Passing - weightWarning = node.Service.Weights.Warning - } - serviceChecks := make(api.HealthChecks, 0, len(node.Checks)) - for _, c := range node.Checks { - if c.ServiceName == node.Service.Service || c.ServiceName == "" { - healthCheck := &api.HealthCheck{ - Node: c.Node, - CheckID: string(c.CheckID), - Name: c.Name, - Status: c.Status, - Notes: c.Notes, - Output: c.Output, - ServiceID: c.ServiceID, - ServiceName: c.ServiceName, - ServiceTags: c.ServiceTags, - } - serviceChecks = append(serviceChecks, healthCheck) - } - } - status := serviceChecks.AggregatedStatus() - switch status { - case api.HealthWarning: - return weightWarning - case api.HealthPassing: - return weightPassing - case api.HealthMaint: - // Not used in theory - return 0 - case api.HealthCritical: - // Should not happen since already filtered - return 0 - default: - // When non-standard status, return 1 - return 1 - } -} diff --git a/agent/discovery/query_fetcher_v1_ce.go b/agent/discovery/query_fetcher_v1_ce.go deleted file mode 100644 index 0260b7a24aa6a..0000000000000 --- a/agent/discovery/query_fetcher_v1_ce.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package discovery - -import ( - "errors" - "fmt" - - "github.com/hashicorp/consul/acl" -) - -func (f *V1DataFetcher) NormalizeRequest(req *QueryPayload) { - // Nothing to do for CE - return -} - -func validateEnterpriseTenancy(req QueryTenancy) error { - if req.Namespace != "" || req.Partition != acl.DefaultPartitionName { - return ErrNotSupported - } - return nil -} - -func queryTenancyToEntMeta(_ QueryTenancy) acl.EnterpriseMeta { - return acl.EnterpriseMeta{} -} - -// fetchServiceFromSamenessGroup fetches a service from a sameness group. -func (f *V1DataFetcher) fetchServiceFromSamenessGroup(ctx Context, req *QueryPayload, cfg *v1DataFetcherDynamicConfig, lookupType LookupType) ([]*Result, error) { - f.logger.Trace(fmt.Sprintf("fetchServiceFromSamenessGroup - req: %+v", req)) - if req.Tenancy.SamenessGroup == "" { - return nil, errors.New("sameness groups must be provided for service lookups") - } - return f.fetchServiceBasedOnTenancy(ctx, req, cfg, lookupType) -} diff --git a/agent/discovery/query_fetcher_v1_ce_test.go b/agent/discovery/query_fetcher_v1_ce_test.go deleted file mode 100644 index 717475c9dccd9..0000000000000 --- a/agent/discovery/query_fetcher_v1_ce_test.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package discovery - -const ( - defaultTestNamespace = "" - defaultTestPartition = "" -) diff --git a/agent/discovery/query_fetcher_v1_test.go b/agent/discovery/query_fetcher_v1_test.go deleted file mode 100644 index a587bc74ff8d2..0000000000000 --- a/agent/discovery/query_fetcher_v1_test.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package discovery - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/cache" - cachetype "github.com/hashicorp/consul/agent/cache-types" - "github.com/hashicorp/consul/agent/config" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/sdk/testutil" -) - -// Test_FetchVirtualIP tests the FetchVirtualIP method in scenarios where the RPC -// call succeeds and fails. -func Test_FetchVirtualIP(t *testing.T) { - // set these to confirm that RPC call does not use them for this particular RPC - rc := &config.RuntimeConfig{ - DNSAllowStale: true, - DNSMaxStale: 100, - DNSUseCache: true, - DNSCacheMaxAge: 100, - } - tests := []struct { - name string - queryPayload *QueryPayload - context Context - expectedResult *Result - expectedErr error - }{ - { - name: "FetchVirtualIP returns result", - queryPayload: &QueryPayload{ - Name: "db", - Tenancy: QueryTenancy{ - Peer: "test-peer", - Namespace: defaultTestNamespace, - Partition: defaultTestPartition, - }, - }, - context: Context{ - Token: "test-token", - }, - expectedResult: &Result{ - Service: &Location{ - Name: "db", - Address: "192.168.10.10", - }, - Type: ResultTypeVirtual, - }, - expectedErr: nil, - }, - { - name: "FetchVirtualIP returns error", - queryPayload: &QueryPayload{ - Name: "db", - Tenancy: QueryTenancy{ - Peer: "test-peer", - Namespace: defaultTestNamespace, - Partition: defaultTestPartition}, - }, - context: Context{ - Token: "test-token", - }, - expectedResult: nil, - expectedErr: errors.New("test-error"), - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - logger := testutil.Logger(t) - mockRPC := cachetype.NewMockRPC(t) - mockRPC.On("RPC", mock.Anything, "Catalog.VirtualIPForService", mock.Anything, mock.Anything). - Return(tc.expectedErr). - Run(func(args mock.Arguments) { - req := args.Get(2).(*structs.ServiceSpecificRequest) - - // validate RPC options are not set from config for the VirtuaLIPForService RPC - require.False(t, req.AllowStale) - require.Equal(t, time.Duration(0), req.MaxStaleDuration) - require.False(t, req.UseCache) - require.Equal(t, time.Duration(0), req.MaxAge) - - // validate RPC options are set correctly from the queryPayload and context - require.Equal(t, tc.queryPayload.Tenancy.Peer, req.PeerName) - require.Equal(t, tc.queryPayload.Tenancy.Namespace, req.EnterpriseMeta.NamespaceOrEmpty()) - require.Equal(t, tc.queryPayload.Tenancy.Partition, req.EnterpriseMeta.PartitionOrEmpty()) - require.Equal(t, tc.context.Token, req.QueryOptions.Token) - - if tc.expectedErr == nil { - // set the out parameter to ensure that it is used to formulate the result.Address - reply := args.Get(3).(*string) - *reply = tc.expectedResult.Service.Address - } - }) - translateServicePortFunc := func(dc string, port int, taggedAddresses map[string]structs.ServiceAddress) int { return 0 } - rpcFuncForServiceNodes := func(ctx context.Context, req structs.ServiceSpecificRequest) (structs.IndexedCheckServiceNodes, cache.ResultMeta, error) { - return structs.IndexedCheckServiceNodes{}, cache.ResultMeta{}, nil - } - rpcFuncForSamenessGroup := func(ctx context.Context, req *structs.ConfigEntryQuery) (structs.SamenessGroupConfigEntry, cache.ResultMeta, error) { - return structs.SamenessGroupConfigEntry{}, cache.ResultMeta{}, nil - } - getFromCacheFunc := func(ctx context.Context, t string, r cache.Request) (interface{}, cache.ResultMeta, error) { - return nil, cache.ResultMeta{}, nil - } - - df := NewV1DataFetcher(rc, acl.DefaultEnterpriseMeta(), getFromCacheFunc, mockRPC.RPC, rpcFuncForServiceNodes, rpcFuncForSamenessGroup, translateServicePortFunc, logger) - - result, err := df.FetchVirtualIP(tc.context, tc.queryPayload) - require.Equal(t, tc.expectedErr, err) - require.Equal(t, tc.expectedResult, result) - }) - } -} - -// Test_FetchEndpoints tests the FetchEndpoints method in scenarios where the RPC -// call succeeds and fails. -func Test_FetchEndpoints(t *testing.T) { - // set these to confirm that RPC call does not use them for this particular RPC - rc := &config.RuntimeConfig{ - DNSAllowStale: true, - DNSMaxStale: 100, - DNSUseCache: true, - DNSCacheMaxAge: 100, - } - ctx := Context{ - Token: "test-token", - } - expectedResults := []*Result{ - { - Node: &Location{ - Name: "node-name", - Address: "node-address", - TaggedAddresses: map[string]*TaggedAddress{}, - }, - Service: &Location{ - Name: "service-name", - Address: "service-address", - TaggedAddresses: map[string]*TaggedAddress{}, - }, - Type: ResultTypeService, - DNS: DNSConfig{ - Weight: 1, - }, - Ports: []Port{ - { - Number: 0, - }, - }, - Tenancy: ResultTenancy{ - PeerName: "test-peer", - }, - }, - } - - logger := testutil.Logger(t) - mockRPC := cachetype.NewMockRPC(t) - translateServicePortFunc := func(dc string, port int, taggedAddresses map[string]structs.ServiceAddress) int { return 0 } - rpcFuncForSamenessGroup := func(ctx context.Context, req *structs.ConfigEntryQuery) (structs.SamenessGroupConfigEntry, cache.ResultMeta, error) { - return structs.SamenessGroupConfigEntry{}, cache.ResultMeta{}, nil - } - getFromCacheFunc := func(ctx context.Context, t string, r cache.Request) (interface{}, cache.ResultMeta, error) { - return nil, cache.ResultMeta{}, nil - } - rpcFuncForServiceNodes := func(ctx context.Context, req structs.ServiceSpecificRequest) (structs.IndexedCheckServiceNodes, cache.ResultMeta, error) { - return structs.IndexedCheckServiceNodes{ - Nodes: []structs.CheckServiceNode{ - { - Node: &structs.Node{ - Address: "node-address", - Node: "node-name", - }, - Service: &structs.NodeService{ - Address: "service-address", - Service: "service-name", - }, - }, - }, - }, cache.ResultMeta{}, nil - } - queryPayload := &QueryPayload{ - Name: "service-name", - Tenancy: QueryTenancy{ - Peer: "test-peer", - Namespace: defaultTestNamespace, - Partition: defaultTestPartition, - }, - } - - df := NewV1DataFetcher(rc, acl.DefaultEnterpriseMeta(), getFromCacheFunc, mockRPC.RPC, rpcFuncForServiceNodes, rpcFuncForSamenessGroup, translateServicePortFunc, logger) - - results, err := df.FetchEndpoints(ctx, queryPayload, LookupTypeService) - require.NoError(t, err) - require.Equal(t, expectedResults, results) -} diff --git a/agent/discovery/query_fetcher_v2.go b/agent/discovery/query_fetcher_v2.go deleted file mode 100644 index 02e8fcacccb18..0000000000000 --- a/agent/discovery/query_fetcher_v2.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package discovery - -import ( - "context" - "fmt" - "math/rand" - "net" - "strings" - "sync/atomic" - - "golang.org/x/exp/slices" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/consul/agent/config" - "github.com/hashicorp/consul/internal/resource" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -// v2DataFetcherDynamicConfig is used to store the dynamic configuration of the V2 data fetcher. -type v2DataFetcherDynamicConfig struct { - onlyPassing bool -} - -// V2DataFetcher is used to fetch data from the V2 catalog. -type V2DataFetcher struct { - client pbresource.ResourceServiceClient - logger hclog.Logger - - // Requests inherit the partition of the agent unless otherwise specified. - defaultPartition string - - dynamicConfig atomic.Value -} - -// NewV2DataFetcher creates a new V2 data fetcher. -func NewV2DataFetcher(config *config.RuntimeConfig, client pbresource.ResourceServiceClient, logger hclog.Logger) *V2DataFetcher { - f := &V2DataFetcher{ - client: client, - logger: logger, - defaultPartition: config.PartitionOrDefault(), - } - f.LoadConfig(config) - return f -} - -// LoadConfig loads the configuration for the V2 data fetcher. -func (f *V2DataFetcher) LoadConfig(config *config.RuntimeConfig) { - dynamicConfig := &v2DataFetcherDynamicConfig{ - onlyPassing: config.DNSOnlyPassing, - } - f.dynamicConfig.Store(dynamicConfig) -} - -// FetchNodes fetches A/AAAA/CNAME -func (f *V2DataFetcher) FetchNodes(ctx Context, req *QueryPayload) ([]*Result, error) { - // TODO (v2-dns): NET-6623 - Implement FetchNodes - // Make sure that we validate that namespace is not provided here - return nil, fmt.Errorf("not implemented") -} - -// FetchEndpoints fetches records for A/AAAA/CNAME or SRV requests for services -func (f *V2DataFetcher) FetchEndpoints(reqContext Context, req *QueryPayload, lookupType LookupType) ([]*Result, error) { - if lookupType != LookupTypeService { - return nil, ErrNotSupported - } - - configCtx := f.dynamicConfig.Load().(*v2DataFetcherDynamicConfig) - - serviceEndpoints := pbcatalog.ServiceEndpoints{} - serviceEndpointsResource, err := f.fetchResource(reqContext, *req, pbcatalog.ServiceEndpointsType, &serviceEndpoints) - if err != nil { - return nil, err - } - - f.logger.Trace("shuffling endpoints", "name", req.Name, "endpoints", len(serviceEndpoints.Endpoints)) - - // Shuffle the endpoints slice - shuffleFunc := func(i, j int) { - serviceEndpoints.Endpoints[i], serviceEndpoints.Endpoints[j] = serviceEndpoints.Endpoints[j], serviceEndpoints.Endpoints[i] - } - rand.Shuffle(len(serviceEndpoints.Endpoints), shuffleFunc) - - // Convert the service endpoints to results up to the limit - limit := req.Limit - if len(serviceEndpoints.Endpoints) < limit || limit == 0 { - limit = len(serviceEndpoints.Endpoints) - } - - results := make([]*Result, 0, limit) - for _, endpoint := range serviceEndpoints.Endpoints[:limit] { - - // First we check the endpoint first to make sure that the requested port is matched from the service. - // We error here because we expect all endpoints to have the same ports as the service. - ports := getResultPorts(req, endpoint.Ports) //assuming the logic changed in getResultPorts - if len(ports) == 0 { - f.logger.Debug("could not find matching port in endpoint", "name", req.Name, "port", req.PortName) - return nil, ErrNotFound - } - - address, err := f.addressFromWorkloadAddresses(endpoint.Addresses, req.Name) - if err != nil { - return nil, err - } - - weight, ok := getEndpointWeight(endpoint, configCtx) - if !ok { - f.logger.Debug("endpoint filtered out because of health status", "name", req.Name, "endpoint", endpoint.GetTargetRef().GetName()) - continue - } - - result := &Result{ - Node: &Location{ - Address: address, - Name: endpoint.GetTargetRef().GetName(), - }, - Type: ResultTypeWorkload, - Tenancy: ResultTenancy{ - Namespace: serviceEndpointsResource.GetId().GetTenancy().GetNamespace(), - Partition: serviceEndpointsResource.GetId().GetTenancy().GetPartition(), - }, - DNS: DNSConfig{ - Weight: weight, - }, - Ports: ports, - } - results = append(results, result) - } - return results, nil -} - -// FetchVirtualIP fetches A/AAAA records for virtual IPs -func (f *V2DataFetcher) FetchVirtualIP(ctx Context, req *QueryPayload) (*Result, error) { - // TODO (v2-dns): NET-6624 - Implement FetchVirtualIP - return nil, fmt.Errorf("not implemented") -} - -// FetchRecordsByIp is used for PTR requests to look up a service/node from an IP. -func (f *V2DataFetcher) FetchRecordsByIp(ctx Context, ip net.IP) ([]*Result, error) { - // TODO (v2-dns): NET-6795 - Implement FetchRecordsByIp - // Validate non-nil IP - return nil, fmt.Errorf("not implemented") -} - -// FetchWorkload is used to fetch a single workload from the V2 catalog. -// V2-only. -func (f *V2DataFetcher) FetchWorkload(reqContext Context, req *QueryPayload) (*Result, error) { - workload := pbcatalog.Workload{} - resourceObj, err := f.fetchResource(reqContext, *req, pbcatalog.WorkloadType, &workload) - if err != nil { - return nil, err - } - - // First we check the endpoint first to make sure that the requested port is matched from the service. - // We error here because we expect all endpoints to have the same ports as the service. - ports := getResultPorts(req, workload.Ports) //assuming the logic changed in getResultPorts - if ports == nil || len(ports) == 0 { - f.logger.Debug("could not find matching port in endpoint", "name", req.Name, "port", req.PortName) - return nil, ErrNotFound - } - - address, err := f.addressFromWorkloadAddresses(workload.Addresses, req.Name) - if err != nil { - return nil, err - } - - tenancy := resourceObj.GetId().GetTenancy() - result := &Result{ - Node: &Location{ - Address: address, - Name: resourceObj.GetId().GetName(), - }, - Type: ResultTypeWorkload, - Tenancy: ResultTenancy{ - Namespace: tenancy.GetNamespace(), - Partition: tenancy.GetPartition(), - }, - Ports: ports, - } - - return result, nil -} - -// FetchPreparedQuery is used to fetch a prepared query from the V2 catalog. -// Deprecated in V2. -func (f *V2DataFetcher) FetchPreparedQuery(ctx Context, req *QueryPayload) ([]*Result, error) { - return nil, ErrNotSupported -} - -func (f *V2DataFetcher) NormalizeRequest(req *QueryPayload) { - // If we do not have an explicit partition in the request, we use the agent's - if req.Tenancy.Partition == "" { - req.Tenancy.Partition = f.defaultPartition - } -} - -// ValidateRequest throws an error is any of the deprecated V1 input fields are used in a QueryByName for this data fetcher. -func (f *V2DataFetcher) ValidateRequest(_ Context, req *QueryPayload) error { - if req.Tag != "" { - return ErrNotSupported - } - if req.SourceIP != nil { - return ErrNotSupported - } - return nil -} - -// fetchResource is used to read a single resource from the V2 catalog and cast into a concrete type. -func (f *V2DataFetcher) fetchResource(reqContext Context, req QueryPayload, kind *pbresource.Type, payload proto.Message) (*pbresource.Resource, error) { - // Query the resource service for the ServiceEndpoints by name and tenancy - resourceReq := pbresource.ReadRequest{ - Id: &pbresource.ID{ - Name: req.Name, - Type: kind, - Tenancy: queryTenancyToResourceTenancy(req.Tenancy), - }, - } - - f.logger.Trace("fetching "+kind.String(), "name", req.Name) - resourceCtx := metadata.AppendToOutgoingContext(context.Background(), "x-consul-token", reqContext.Token) - - // If the service is not found, return nil and an error equivalent to NXDOMAIN - response, err := f.client.Read(resourceCtx, &resourceReq) - switch { - case grpcNotFoundErr(err): - f.logger.Debug(kind.String()+" not found", "name", req.Name) - return nil, ErrNotFound - case err != nil: - f.logger.Error("error fetching "+kind.String(), "name", req.Name) - return nil, fmt.Errorf("error fetching %s: %w", kind.String(), err) - // default: fallthrough - } - - data := response.GetResource().GetData() - if err := data.UnmarshalTo(payload); err != nil { - f.logger.Error("error unmarshalling "+kind.String(), "name", req.Name) - return nil, fmt.Errorf("error unmarshalling %s: %w", kind.String(), err) - } - return response.GetResource(), nil -} - -// addressFromWorkloadAddresses returns one address from the workload addresses. -func (f *V2DataFetcher) addressFromWorkloadAddresses(addresses []*pbcatalog.WorkloadAddress, name string) (string, error) { - // TODO: (v2-dns): we will need to intelligently return the right workload address based on either the translate - // address setting or the locality of the requester. Workloads must have at least one. - // We also need to make sure that we filter out unix sockets here. - address := addresses[0].GetHost() - if strings.HasPrefix(address, "unix://") { - f.logger.Error("unix sockets are currently unsupported in workload results", "name", name) - return "", ErrNotFound - } - return address, nil -} - -// getEndpointWeight returns the weight of the endpoint and a boolean indicating if the endpoint should be included -// based on it's health status. -func getEndpointWeight(endpoint *pbcatalog.Endpoint, configCtx *v2DataFetcherDynamicConfig) (uint32, bool) { - health := endpoint.GetHealthStatus().Enum() - if health == nil { - return 0, false - } - - // Filter based on health status and agent config - // This is also a good opportunity to see if SRV weights are set - var weight uint32 - switch *health { - case pbcatalog.Health_HEALTH_PASSING: - weight = endpoint.GetDns().GetWeights().GetPassing() - case pbcatalog.Health_HEALTH_CRITICAL: - return 0, false // always filtered out - case pbcatalog.Health_HEALTH_WARNING: - if configCtx.onlyPassing { - return 0, false // filtered out - } - weight = endpoint.GetDns().GetWeights().GetWarning() - default: - // Everything else can be filtered out - return 0, false - } - - // Important! double-check the weight in the case DNS weights are not set - if weight == 0 { - weight = 1 - } - return weight, true -} - -// getResultPorts conditionally returns ports from a map based on a query. The results are sorted by name. -func getResultPorts(req *QueryPayload, workloadPorts map[string]*pbcatalog.WorkloadPort) []Port { - if len(workloadPorts) == 0 { - return nil - } - - var ports []Port - if req.PortName != "" { - // Make sure the workload implements that port name. - if _, ok := workloadPorts[req.PortName]; !ok { - return nil - } - // In the case that the query asked for a specific port, we only return that port. - ports = []Port{ - { - Name: req.PortName, - Number: workloadPorts[req.PortName].Port, - }, - } - } else { - // If the client didn't specify a particular port, return all the workload ports. - for name, port := range workloadPorts { - ports = append(ports, Port{ - Name: name, - Number: port.Port, - }) - } - // Stable Sort - slices.SortStableFunc(ports, func(i, j Port) int { - if i.Name < j.Name { - return -1 - } else if i.Name > j.Name { - return 1 - } - return 0 - }) - } - return ports -} - -// queryTenancyToResourceTenancy converts a QueryTenancy to a pbresource.Tenancy. -func queryTenancyToResourceTenancy(qTenancy QueryTenancy) *pbresource.Tenancy { - rTenancy := resource.DefaultNamespacedTenancy() - - // If the request has any tenancy specified, it overrides the defaults. - if qTenancy.Namespace != "" { - rTenancy.Namespace = qTenancy.Namespace - } - // In the case of partition, we have the agent's partition as the fallback. - if qTenancy.Partition != "" { - rTenancy.Partition = qTenancy.Partition - } - - return rTenancy -} - -// grpcNotFoundErr returns true if the error is a gRPC status error with a code of NotFound. -func grpcNotFoundErr(err error) bool { - if err == nil { - return false - } - s, ok := status.FromError(err) - return ok && s.Code() == codes.NotFound -} diff --git a/agent/discovery/query_fetcher_v2_test.go b/agent/discovery/query_fetcher_v2_test.go deleted file mode 100644 index 5519ad13c7943..0000000000000 --- a/agent/discovery/query_fetcher_v2_test.go +++ /dev/null @@ -1,859 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package discovery - -import ( - "errors" - "fmt" - "testing" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/hashicorp/consul/agent/config" - mockpbresource "github.com/hashicorp/consul/grpcmocks/proto-public/pbresource" - "github.com/hashicorp/consul/internal/resource" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/sdk/testutil" -) - -var ( - unknownErr = errors.New("I don't feel so good") -) - -// Test_FetchService tests the FetchService method in scenarios where the RPC -// call succeeds and fails. -func Test_FetchWorkload(t *testing.T) { - - rc := &config.RuntimeConfig{ - DNSOnlyPassing: false, - } - - tests := []struct { - name string - queryPayload *QueryPayload - context Context - configureMockClient func(mockClient *mockpbresource.ResourceServiceClient_Expecter) - expectedResult *Result - expectedErr error - }{ - { - name: "FetchWorkload returns result", - queryPayload: &QueryPayload{ - Name: "foo-1234", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - result := getTestWorkloadResponse(t, "foo-1234", "", "") - mockClient.Read(mock.Anything, mock.Anything). - Return(result, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedResult: &Result{ - Node: &Location{Name: "foo-1234", Address: "1.2.3.4"}, - Type: ResultTypeWorkload, - Ports: []Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "mesh", - Number: 21000, - }, - }, - Tenancy: ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - }, - expectedErr: nil, - }, - { - name: "FetchWorkload for non-existent workload", - queryPayload: &QueryPayload{ - Name: "foo-1234", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - input := getTestWorkloadResponse(t, "foo-1234", "", "") - mockClient.Read(mock.Anything, mock.Anything). - Return(nil, status.Error(codes.NotFound, "not found")). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, input.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedResult: nil, - expectedErr: ErrNotFound, - }, - { - name: "FetchWorkload encounters a resource client error", - queryPayload: &QueryPayload{ - Name: "foo-1234", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - input := getTestWorkloadResponse(t, "foo-1234", "", "") - mockClient.Read(mock.Anything, mock.Anything). - Return(nil, unknownErr). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, input.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedResult: nil, - expectedErr: unknownErr, - }, - { - name: "FetchWorkload with a matching port", - queryPayload: &QueryPayload{ - Name: "foo-1234", - PortName: "api", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - result := getTestWorkloadResponse(t, "foo-1234", "", "") - mockClient.Read(mock.Anything, mock.Anything). - Return(result, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedResult: &Result{ - Node: &Location{Name: "foo-1234", Address: "1.2.3.4"}, - Type: ResultTypeWorkload, - Ports: []Port{ - { - Name: "api", - Number: 5678, - }, - }, - Tenancy: ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - }, - expectedErr: nil, - }, - { - name: "FetchWorkload with a matching port", - queryPayload: &QueryPayload{ - Name: "foo-1234", - PortName: "not-api", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - result := getTestWorkloadResponse(t, "foo-1234", "", "") - mockClient.Read(mock.Anything, mock.Anything). - Return(result, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedResult: nil, - expectedErr: ErrNotFound, - }, - { - name: "FetchWorkload returns result for non-default tenancy", - queryPayload: &QueryPayload{ - Name: "foo-1234", - Tenancy: QueryTenancy{ - Namespace: "test-namespace", - Partition: "test-partition", - }, - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - result := getTestWorkloadResponse(t, "foo-1234", "test-namespace", "test-partition") - mockClient.Read(mock.Anything, mock.Anything). - Return(result, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - require.Equal(t, result.GetResource().GetId().GetTenancy().GetNamespace(), req.Id.Tenancy.Namespace) - require.Equal(t, result.GetResource().GetId().GetTenancy().GetPartition(), req.Id.Tenancy.Partition) - }) - }, - expectedResult: &Result{ - Node: &Location{Name: "foo-1234", Address: "1.2.3.4"}, - Type: ResultTypeWorkload, - Ports: []Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "mesh", - Number: 21000, - }, - }, - Tenancy: ResultTenancy{ - Namespace: "test-namespace", - Partition: "test-partition", - }, - }, - expectedErr: nil, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - logger := testutil.Logger(t) - - client := mockpbresource.NewResourceServiceClient(t) - mockClient := client.EXPECT() - tc.configureMockClient(mockClient) - - df := NewV2DataFetcher(rc, client, logger) - - result, err := df.FetchWorkload(tc.context, tc.queryPayload) - require.True(t, errors.Is(err, tc.expectedErr)) - require.Equal(t, tc.expectedResult, result) - }) - } -} - -// Test_V2FetchEndpoints the FetchService method in scenarios where the RPC -// call succeeds and fails. -func Test_V2FetchEndpoints(t *testing.T) { - - tests := []struct { - name string - queryPayload *QueryPayload - context Context - configureMockClient func(mockClient *mockpbresource.ResourceServiceClient_Expecter) - rc *config.RuntimeConfig - expectedResult []*Result - expectedErr error - verifyShuffle bool - }{ - { - name: "FetchEndpoints returns result", - queryPayload: &QueryPayload{ - Name: "consul", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - endpoints := []*pbcatalog.Endpoint{ - makeEndpoint("consul-1", "1.2.3.4", pbcatalog.Health_HEALTH_PASSING, 0, 0), - } - - serviceEndpoints := getTestEndpointsResponse(t, "", "", endpoints...) - mockClient.Read(mock.Anything, mock.Anything). - Return(serviceEndpoints, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, serviceEndpoints.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedResult: []*Result{ - { - Node: &Location{Name: "consul-1", Address: "1.2.3.4"}, - Type: ResultTypeWorkload, - Ports: []Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "mesh", - Number: 21000, - }, - }, - Tenancy: ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - DNS: DNSConfig{ - Weight: 1, - }, - }, - }, - }, - { - name: "FetchEndpoints returns empty result with no endpoints", - queryPayload: &QueryPayload{ - Name: "consul", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - - result := getTestEndpointsResponse(t, "", "") - mockClient.Read(mock.Anything, mock.Anything). - Return(result, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedResult: []*Result{}, - }, - { - name: "FetchEndpoints returns a name error when the ServiceEndpoint does not exist", - queryPayload: &QueryPayload{ - Name: "consul", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - - result := getTestEndpointsResponse(t, "", "") - mockClient.Read(mock.Anything, mock.Anything). - Return(nil, status.Error(codes.NotFound, "not found")). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedErr: ErrNotFound, - }, - { - name: "FetchEndpoints encounters a resource client error", - queryPayload: &QueryPayload{ - Name: "consul", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - - result := getTestEndpointsResponse(t, "", "") - mockClient.Read(mock.Anything, mock.Anything). - Return(nil, unknownErr). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedErr: unknownErr, - }, - { - name: "FetchEndpoints always filters out critical endpoints; DNS weights applied correctly", - queryPayload: &QueryPayload{ - Name: "consul", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - results := []*pbcatalog.Endpoint{ - makeEndpoint("consul-1", "1.2.3.4", pbcatalog.Health_HEALTH_PASSING, 2, 3), - makeEndpoint("consul-2", "2.3.4.5", pbcatalog.Health_HEALTH_WARNING, 2, 3), - makeEndpoint("consul-3", "3.4.5.6", pbcatalog.Health_HEALTH_CRITICAL, 2, 3), - } - - result := getTestEndpointsResponse(t, "", "", results...) - mockClient.Read(mock.Anything, mock.Anything). - Return(result, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedResult: []*Result{ - { - Node: &Location{Name: "consul-1", Address: "1.2.3.4"}, - Type: ResultTypeWorkload, - Tenancy: ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - DNS: DNSConfig{ - Weight: 2, - }, - Ports: []Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "mesh", - Number: 21000, - }, - }, - }, - { - Node: &Location{Name: "consul-2", Address: "2.3.4.5"}, - Type: ResultTypeWorkload, - Tenancy: ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - DNS: DNSConfig{ - Weight: 3, - }, - Ports: []Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "mesh", - Number: 21000, - }, - }, - }, - }, - }, - { - name: "FetchEndpoints filters out warning endpoints when DNSOnlyPassing is true", - queryPayload: &QueryPayload{ - Name: "consul", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - results := []*pbcatalog.Endpoint{ - makeEndpoint("consul-1", "1.2.3.4", pbcatalog.Health_HEALTH_PASSING, 2, 3), - makeEndpoint("consul-2", "2.3.4.5", pbcatalog.Health_HEALTH_WARNING, 2, 3), - makeEndpoint("consul-3", "3.4.5.6", pbcatalog.Health_HEALTH_CRITICAL, 2, 3), - } - - result := getTestEndpointsResponse(t, "", "", results...) - mockClient.Read(mock.Anything, mock.Anything). - Return(result, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - rc: &config.RuntimeConfig{ - DNSOnlyPassing: true, - }, - expectedResult: []*Result{ - { - Node: &Location{Name: "consul-1", Address: "1.2.3.4"}, - Type: ResultTypeWorkload, - Tenancy: ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - DNS: DNSConfig{ - Weight: 2, - }, - Ports: []Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "mesh", - Number: 21000, - }, - }, - }, - }, - }, - { - name: "FetchEndpoints shuffles the results", - queryPayload: &QueryPayload{ - Name: "consul", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - results := []*pbcatalog.Endpoint{ - // use a set of 10 elements, the odds of getting the same result are 1 in 3628800 - makeEndpoint("consul-1", "10.0.0.1", pbcatalog.Health_HEALTH_PASSING, 0, 0), - makeEndpoint("consul-2", "10.0.0.2", pbcatalog.Health_HEALTH_PASSING, 0, 0), - makeEndpoint("consul-3", "10.0.0.3", pbcatalog.Health_HEALTH_PASSING, 0, 0), - makeEndpoint("consul-4", "10.0.0.4", pbcatalog.Health_HEALTH_PASSING, 0, 0), - makeEndpoint("consul-5", "10.0.0.5", pbcatalog.Health_HEALTH_PASSING, 0, 0), - makeEndpoint("consul-6", "10.0.0.6", pbcatalog.Health_HEALTH_PASSING, 0, 0), - makeEndpoint("consul-7", "10.0.0.7", pbcatalog.Health_HEALTH_PASSING, 0, 0), - makeEndpoint("consul-8", "10.0.0.8", pbcatalog.Health_HEALTH_PASSING, 0, 0), - makeEndpoint("consul-9", "10.0.0.9", pbcatalog.Health_HEALTH_PASSING, 0, 0), - makeEndpoint("consul-10", "10.0.0.10", pbcatalog.Health_HEALTH_PASSING, 0, 0), - } - - result := getTestEndpointsResponse(t, "", "", results...) - mockClient.Read(mock.Anything, mock.Anything). - Return(result, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedResult: func() []*Result { - results := make([]*Result, 0, 10) - - for i := 0; i < 10; i++ { - name := fmt.Sprintf("consul-%d", i+1) - address := fmt.Sprintf("10.0.0.%d", i+1) - result := &Result{ - Node: &Location{Name: name, Address: address}, - Type: ResultTypeWorkload, - Tenancy: ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - Ports: []Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "mesh", - Number: 21000, - }, - }, - DNS: DNSConfig{ - Weight: 1, - }, - } - results = append(results, result) - } - return results - }(), - verifyShuffle: true, - }, - { - name: "FetchEndpoints returns only the specified limit", - queryPayload: &QueryPayload{ - Name: "consul", - Limit: 1, - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - results := []*pbcatalog.Endpoint{ - // intentionally all the same to make this easier to verify - makeEndpoint("consul-1", "10.0.0.1", pbcatalog.Health_HEALTH_PASSING, 0, 0), - makeEndpoint("consul-1", "10.0.0.1", pbcatalog.Health_HEALTH_PASSING, 0, 0), - makeEndpoint("consul-1", "10.0.0.1", pbcatalog.Health_HEALTH_PASSING, 0, 0), - } - - result := getTestEndpointsResponse(t, "", "", results...) - mockClient.Read(mock.Anything, mock.Anything). - Return(result, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedResult: []*Result{ - { - Node: &Location{Name: "consul-1", Address: "10.0.0.1"}, - Type: ResultTypeWorkload, - Tenancy: ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - DNS: DNSConfig{ - Weight: 1, - }, - Ports: []Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "mesh", - Number: 21000, - }, - }, - }, - }, - }, - { - name: "FetchEndpoints returns results with non-default tenancy", - queryPayload: &QueryPayload{ - Name: "consul", - Tenancy: QueryTenancy{ - Namespace: "test-namespace", - Partition: "test-partition", - }, - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - results := []*pbcatalog.Endpoint{ - // intentionally all the same to make this easier to verify - makeEndpoint("consul-1", "10.0.0.1", pbcatalog.Health_HEALTH_PASSING, 0, 0), - } - - result := getTestEndpointsResponse(t, "test-namespace", "test-partition", results...) - mockClient.Read(mock.Anything, mock.Anything). - Return(result, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, result.GetResource().GetId().GetName(), req.Id.Name) - require.Equal(t, result.GetResource().GetId().GetTenancy().GetNamespace(), req.Id.Tenancy.Namespace) - require.Equal(t, result.GetResource().GetId().GetTenancy().GetPartition(), req.Id.Tenancy.Partition) - }) - }, - expectedResult: []*Result{ - { - Node: &Location{Name: "consul-1", Address: "10.0.0.1"}, - Type: ResultTypeWorkload, - Tenancy: ResultTenancy{ - Namespace: "test-namespace", - Partition: "test-partition", - }, - DNS: DNSConfig{ - Weight: 1, - }, - Ports: []Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "mesh", - Number: 21000, - }, - }, - }, - }, - }, - { - name: "FetchEndpoints returns only a specific port if is one requested", - queryPayload: &QueryPayload{ - Name: "consul", - PortName: "api", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - endpoints := []*pbcatalog.Endpoint{ - makeEndpoint("consul-1", "10.0.0.1", pbcatalog.Health_HEALTH_PASSING, 0, 0), - } - - serviceEndpoints := getTestEndpointsResponse(t, "", "", endpoints...) - mockClient.Read(mock.Anything, mock.Anything). - Return(serviceEndpoints, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, serviceEndpoints.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedResult: []*Result{ - { - Node: &Location{Name: "consul-1", Address: "10.0.0.1"}, - Type: ResultTypeWorkload, - Ports: []Port{ - { - Name: "api", - Number: 5678, - }, - // No mesh port this time - }, - Tenancy: ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - DNS: DNSConfig{ - Weight: 1, - }, - }, - }, - }, - { - name: "FetchEndpoints returns a name error when a service doesn't implement the requested port", - queryPayload: &QueryPayload{ - Name: "consul", - PortName: "banana", - }, - context: Context{ - Token: "test-token", - }, - configureMockClient: func(mockClient *mockpbresource.ResourceServiceClient_Expecter) { - endpoints := []*pbcatalog.Endpoint{ - makeEndpoint("consul-1", "10.0.0.1", pbcatalog.Health_HEALTH_PASSING, 0, 0), - } - - serviceEndpoints := getTestEndpointsResponse(t, "", "", endpoints...) - mockClient.Read(mock.Anything, mock.Anything). - Return(serviceEndpoints, nil). - Once(). - Run(func(args mock.Arguments) { - req := args.Get(1).(*pbresource.ReadRequest) - require.Equal(t, serviceEndpoints.GetResource().GetId().GetName(), req.Id.Name) - }) - }, - expectedErr: ErrNotFound, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - logger := testutil.Logger(t) - - client := mockpbresource.NewResourceServiceClient(t) - mockClient := client.EXPECT() - tc.configureMockClient(mockClient) - - if tc.rc == nil { - tc.rc = &config.RuntimeConfig{ - DNSOnlyPassing: false, - } - } - - df := NewV2DataFetcher(tc.rc, client, logger) - - result, err := df.FetchEndpoints(tc.context, tc.queryPayload, LookupTypeService) - require.True(t, errors.Is(err, tc.expectedErr)) - - if tc.verifyShuffle { - require.NotEqualf(t, tc.expectedResult, result, "expected result to be shuffled. There is a small probability that it shuffled back to the original order. In that case, you may want to play the lottery.") - } - - require.ElementsMatchf(t, tc.expectedResult, result, "elements of results should match") - }) - } -} - -func getTestWorkloadResponse(t *testing.T, name string, nsOverride string, partitionOverride string) *pbresource.ReadResponse { - workload := &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - { - Host: "1.2.3.4", - Ports: []string{"api", "mesh"}, - }, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "api": { - Port: 5678, - }, - "mesh": { - Port: 21000, - }, - }, - Identity: "test-identity", - } - - data, err := anypb.New(workload) - require.NoError(t, err) - - resp := &pbresource.ReadResponse{ - Resource: &pbresource.Resource{ - Id: &pbresource.ID{ - Name: name, - Type: pbcatalog.WorkloadType, - Tenancy: resource.DefaultNamespacedTenancy(), - }, - Data: data, - }, - } - - if nsOverride != "" { - resp.Resource.Id.Tenancy.Namespace = nsOverride - } - if partitionOverride != "" { - resp.Resource.Id.Tenancy.Partition = partitionOverride - } - - return resp -} - -func makeEndpoint(name string, address string, health pbcatalog.Health, weightPassing, weightWarning uint32) *pbcatalog.Endpoint { - endpoint := &pbcatalog.Endpoint{ - Addresses: []*pbcatalog.WorkloadAddress{ - { - Host: address, - Ports: []string{"api"}, - }, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "api": { - Port: 5678, - }, - "mesh": { - Port: 21000, - }, - }, - HealthStatus: health, - TargetRef: &pbresource.ID{ - Name: name, - }, - } - - if weightPassing > 0 || weightWarning > 0 { - endpoint.Dns = &pbcatalog.DNSPolicy{ - Weights: &pbcatalog.Weights{ - Passing: weightPassing, - Warning: weightWarning, - }, - } - } - - return endpoint -} - -func getTestEndpointsResponse(t *testing.T, nsOverride string, partitionOverride string, endpoints ...*pbcatalog.Endpoint) *pbresource.ReadResponse { - serviceEndpoints := &pbcatalog.ServiceEndpoints{ - Endpoints: endpoints, - } - - data, err := anypb.New(serviceEndpoints) - require.NoError(t, err) - - resp := &pbresource.ReadResponse{ - Resource: &pbresource.Resource{ - Id: &pbresource.ID{ - Name: "consul", - Type: pbcatalog.ServiceType, - Tenancy: resource.DefaultNamespacedTenancy(), - }, - Data: data, - }, - } - - if nsOverride != "" { - resp.Resource.Id.Tenancy.Namespace = nsOverride - } - if partitionOverride != "" { - resp.Resource.Id.Tenancy.Partition = partitionOverride - } - - return resp -} diff --git a/agent/discovery_chain_endpoint.go b/agent/discovery_chain_endpoint.go index 69a5e668f46e1..a3aaa421f9387 100644 --- a/agent/discovery_chain_endpoint.go +++ b/agent/discovery_chain_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/discovery_chain_endpoint_test.go b/agent/discovery_chain_endpoint_test.go index 7e1e9a5524e4f..ed42ca0aede8a 100644 --- a/agent/discovery_chain_endpoint_test.go +++ b/agent/discovery_chain_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/dns.go b/agent/dns.go index ebcafc1d61a42..5804dc97dd8ef 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -16,22 +16,41 @@ import ( "time" "github.com/armon/go-metrics" + "github.com/armon/go-metrics/prometheus" "github.com/armon/go-radix" + "github.com/coredns/coredns/plugin/pkg/dnsutil" "github.com/hashicorp/go-hclog" "github.com/miekg/dns" "github.com/hashicorp/consul/acl" cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/config" + agentdns "github.com/hashicorp/consul/agent/dns" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" - dnsutil "github.com/hashicorp/consul/internal/dnsutil" - libdns "github.com/hashicorp/consul/internal/dnsutil" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" ) +var DNSCounters = []prometheus.CounterDefinition{ + { + Name: []string{"dns", "stale_queries"}, + Help: "Increments when an agent serves a query within the allowed stale threshold.", + }, +} + +var DNSSummaries = []prometheus.SummaryDefinition{ + { + Name: []string{"dns", "ptr_query"}, + Help: "Measures the time spent handling a reverse DNS query for the given node.", + }, + { + Name: []string{"dns", "domain_query"}, + Help: "Measures the time spent handling a domain query for the given node.", + }, +} + const ( // UDP can fit ~25 A records in a 512B response, and ~14 AAAA // records. Limit further to prevent unintentional configuration @@ -71,7 +90,7 @@ type dnsConfig struct { NodeName string NodeTTL time.Duration OnlyPassing bool - RecursorStrategy structs.RecursorStrategy + RecursorStrategy agentdns.RecursorStrategy RecursorTimeout time.Duration Recursors []string SegmentName string @@ -245,28 +264,6 @@ func (d *DNSServer) ListenAndServe(network, addr string, notif func()) error { return d.Server.ListenAndServe() } -func (d *DNSServer) Shutdown() { - if d.Server != nil { - d.logger.Info("Stopping server", - "protocol", "DNS", - "address", d.Server.Addr, - "network", d.Server.Net, - ) - err := d.Server.Shutdown() - if err != nil { - d.logger.Error("Error stopping DNS server", "error", err) - } - } -} - -// GetAddr is a function to return the server address if is not nil. -func (d *DNSServer) GetAddr() string { - if d.Server != nil { - return d.Server.Addr - } - return "" -} - // toggleRecursorHandlerFromConfig enables or disables the recursor handler based on config idempotently func (d *DNSServer) toggleRecursorHandlerFromConfig(cfg *dnsConfig) { shouldEnable := len(cfg.Recursors) > 0 @@ -387,17 +384,8 @@ func (d *DNSServer) getResponseDomain(questionName string) string { func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { q := req.Question[0] defer func(s time.Time) { - // V1 DNS-style metrics metrics.MeasureSinceWithLabels([]string{"dns", "ptr_query"}, s, []metrics.Label{{Name: "node", Value: d.agent.config.NodeName}}) - - // V2 DNS-style metrics for forward compatibility - metrics.MeasureSinceWithLabels([]string{"dns", "query"}, s, - []metrics.Label{ - {Name: "node", Value: d.agent.config.NodeName}, - {Name: "type", Value: dns.Type(dns.TypePTR).String()}, - }) - d.logger.Debug("request served from client", "question", q, "latency", time.Since(s).String(), @@ -413,12 +401,11 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { m.SetReply(req) m.Compress = !cfg.DisableCompression m.Authoritative = true - recursionAvailable := atomic.LoadUint32(&(d.recursorEnabled)) == 1 - m.RecursionAvailable = recursionAvailable + m.RecursionAvailable = (len(cfg.Recursors) > 0) // Only add the SOA if requested if req.Question[0].Qtype == dns.TypeSOA { - d.addSOAToMessage(cfg, m, q.Name) + d.addSOA(cfg, m, q.Name) } datacenter := d.agent.config.Datacenter @@ -429,7 +416,7 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { args := structs.DCSpecificRequest{ Datacenter: datacenter, QueryOptions: structs.QueryOptions{ - Token: d.coalesceDNSToken(), + Token: d.agent.tokens.UserToken(), AllowStale: cfg.AllowStale, }, } @@ -461,15 +448,11 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { // only look into the services if we didn't find a node if len(m.Answer) == 0 { // lookup the service address - ip := libdns.IPFromARPA(qName) - var serviceAddress string - if ip != nil { - serviceAddress = ip.String() - } + serviceAddress := dnsutil.ExtractAddressFromReverse(qName) sargs := structs.ServiceSpecificRequest{ Datacenter: datacenter, QueryOptions: structs.QueryOptions{ - Token: d.coalesceDNSToken(), + Token: d.agent.tokens.UserToken(), AllowStale: cfg.AllowStale, }, ServiceAddress: serviceAddress, @@ -493,13 +476,8 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { // nothing found locally, recurse if len(m.Answer) == 0 { - if recursionAvailable { - d.handleRecurse(resp, req) - return - } else { - m.SetRcode(req, dns.RcodeNameError) - d.addSOAToMessage(cfg, m, q.Name) - } + d.handleRecurse(resp, req) + return } // ptr record responses are globally valid @@ -515,21 +493,12 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) { q := req.Question[0] defer func(s time.Time) { - // V1 DNS-style metrics metrics.MeasureSinceWithLabels([]string{"dns", "domain_query"}, s, []metrics.Label{{Name: "node", Value: d.agent.config.NodeName}}) - - // V2 DNS-style metrics for forward compatibility - metrics.MeasureSinceWithLabels([]string{"dns", "query"}, s, - []metrics.Label{ - {Name: "node", Value: d.agent.config.NodeName}, - {Name: "type", Value: dns.Type(q.Qtype).String()}, - }) - d.logger.Debug("request served from client", "name", q.Name, - "type", dns.Type(q.Qtype).String(), - "class", dns.Class(q.Qclass).String(), + "type", dns.Type(q.Qtype), + "class", dns.Class(q.Qclass), "latency", time.Since(s).String(), "client", resp.RemoteAddr().String(), "client_network", resp.RemoteAddr().Network(), @@ -544,7 +513,7 @@ func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) { cfg := d.config.Load().(*dnsConfig) - // Set up the message response + // Setup the message response m := new(dns.Msg) m.SetReply(req) m.Compress = !cfg.DisableCompression @@ -555,14 +524,14 @@ func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) { switch req.Question[0].Qtype { case dns.TypeSOA: - ns, glue := d.getNameserversAndNodeRecord(req.Question[0].Name, cfg, maxRecursionLevelDefault) - m.Answer = append(m.Answer, d.makeSOARecord(cfg, q.Name)) + ns, glue := d.nameservers(req.Question[0].Name, cfg, maxRecursionLevelDefault) + m.Answer = append(m.Answer, d.soa(cfg, q.Name)) m.Ns = append(m.Ns, ns...) m.Extra = append(m.Extra, glue...) m.SetRcode(req, dns.RcodeSuccess) case dns.TypeNS: - ns, glue := d.getNameserversAndNodeRecord(req.Question[0].Name, cfg, maxRecursionLevelDefault) + ns, glue := d.nameservers(req.Question[0].Name, cfg, maxRecursionLevelDefault) m.Answer = ns m.Extra = glue m.SetRcode(req, dns.RcodeSuccess) @@ -574,7 +543,7 @@ func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) { err = d.dispatch(resp.RemoteAddr(), req, m, maxRecursionLevelDefault) rCode := rCodeFromError(err) if rCode == dns.RcodeNameError || errors.Is(err, errNoData) { - d.addSOAToMessage(cfg, m, q.Name) + d.addSOA(cfg, m, q.Name) } m.SetRcode(req, rCode) } @@ -588,8 +557,7 @@ func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) { } } -// Craft dns records for an SOA -func (d *DNSServer) makeSOARecord(cfg *dnsConfig, questionName string) *dns.SOA { +func (d *DNSServer) soa(cfg *dnsConfig, questionName string) *dns.SOA { domain := d.domain if d.altDomain != "" && strings.HasSuffix(questionName, "."+d.altDomain) { domain = d.altDomain @@ -614,13 +582,14 @@ func (d *DNSServer) makeSOARecord(cfg *dnsConfig, questionName string) *dns.SOA } // addSOA is used to add an SOA record to a message for the given domain -func (d *DNSServer) addSOAToMessage(cfg *dnsConfig, msg *dns.Msg, questionName string) { - msg.Ns = append(msg.Ns, d.makeSOARecord(cfg, questionName)) +func (d *DNSServer) addSOA(cfg *dnsConfig, msg *dns.Msg, questionName string) { + msg.Ns = append(msg.Ns, d.soa(cfg, questionName)) } -// getNameserversAndNodeRecord returns the names and ip addresses of up to three random servers +// nameservers returns the names and ip addresses of up to three random servers // in the current cluster which serve as authoritative name servers for zone. -func (d *DNSServer) getNameserversAndNodeRecord(questionName string, cfg *dnsConfig, maxRecursionLevel int) (ns []dns.RR, extra []dns.RR) { + +func (d *DNSServer) nameservers(questionName string, cfg *dnsConfig, maxRecursionLevel int) (ns []dns.RR, extra []dns.RR) { out, err := d.lookupServiceNodes(cfg, serviceLookup{ Datacenter: d.agent.config.Datacenter, Service: structs.ConsulServiceName, @@ -644,7 +613,7 @@ func (d *DNSServer) getNameserversAndNodeRecord(questionName string, cfg *dnsCon for _, o := range out.Nodes { name, dc := o.Node.Node, o.Node.Datacenter - if libdns.InvalidNameRe.MatchString(name) { + if agentdns.InvalidNameRe.MatchString(name) { d.logger.Warn("Skipping invalid node for NS records", "node", name) continue } @@ -675,10 +644,12 @@ func (d *DNSServer) getNameserversAndNodeRecord(questionName string, cfg *dnsCon return } -// parseDatacenter will do the following: -// - if zero labels are passed, return true without modifying the datacenter parameter -// - if one label is passed, set the datacenter parameter to the label and return true -// - Otherwise it will return false without modifying the datacenter parameter +func (d *DNSServer) invalidQuery(req, resp *dns.Msg, cfg *dnsConfig, qName string) { + d.logger.Warn("QName invalid", "qname", qName) + d.addSOA(cfg, resp, qName) + resp.SetRcode(req, dns.RcodeNameError) +} + func (d *DNSServer) parseDatacenter(labels []string, datacenter *string) bool { switch len(labels) { case 1: @@ -853,7 +824,7 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi // tag[.tag].name.service.consul } - err = d.handleServiceQuery(cfg, lookup, req, resp) + err = d.serviceLookup(cfg, lookup, req, resp) // Return if we are error free right away, otherwise loop again if we can if err == nil { return nil @@ -884,7 +855,7 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi EnterpriseMeta: locality.EnterpriseMeta, } // name.connect.consul - return d.handleServiceQuery(cfg, lookup, req, resp) + return d.serviceLookup(cfg, lookup, req, resp) case "virtual": if len(queryParts) < 1 { @@ -904,7 +875,7 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi ServiceName: queryParts[len(queryParts)-1], EnterpriseMeta: locality.EnterpriseMeta, QueryOptions: structs.QueryOptions{ - Token: d.coalesceDNSToken(), + Token: d.agent.tokens.UserToken(), }, } if args.PeerName == "" { @@ -951,7 +922,7 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi EnterpriseMeta: locality.EnterpriseMeta, } // name.ingress.consul - return d.handleServiceQuery(cfg, lookup, req, resp) + return d.serviceLookup(cfg, lookup, req, resp) case "node": if len(queryParts) < 1 { @@ -984,7 +955,7 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi lookup.Datacenter = "" } - return d.handleNodeQuery(cfg, lookup, req, resp) + return d.nodeLookup(cfg, lookup, req, resp) case "query": n := len(queryParts) @@ -1016,7 +987,7 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi query = strings.Join(queryParts, ".") } - err := d.handlePreparedQuery(cfg, datacenter, query, remoteAddr, req, resp, maxRecursionLevel) + err := d.preparedQueryLookup(cfg, datacenter, query, remoteAddr, req, resp, maxRecursionLevel) return ecsNotGlobalError{error: err} case "addr": @@ -1069,8 +1040,6 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi } else { resp.Answer = append(resp.Answer, aaaaRecord) } - default: - return invalid() } return nil default: @@ -1110,8 +1079,8 @@ func rCodeFromError(err error) int { } } -// handleNodeQuery is used to handle a node query -func (d *DNSServer) handleNodeQuery(cfg *dnsConfig, lookup nodeLookup, req, resp *dns.Msg) error { +// nodeLookup is used to handle a node query +func (d *DNSServer) nodeLookup(cfg *dnsConfig, lookup nodeLookup, req, resp *dns.Msg) error { // Only handle ANY, A, AAAA, and TXT type requests qType := req.Question[0].Qtype if qType != dns.TypeANY && qType != dns.TypeA && qType != dns.TypeAAAA && qType != dns.TypeTXT { @@ -1124,7 +1093,7 @@ func (d *DNSServer) handleNodeQuery(cfg *dnsConfig, lookup nodeLookup, req, resp PeerName: lookup.PeerName, Node: lookup.Node, QueryOptions: structs.QueryOptions{ - Token: d.coalesceDNSToken(), + Token: d.agent.tokens.UserToken(), AllowStale: cfg.AllowStale, }, EnterpriseMeta: lookup.EnterpriseMeta, @@ -1155,14 +1124,12 @@ func (d *DNSServer) handleNodeQuery(cfg *dnsConfig, lookup nodeLookup, req, resp } if cfg.NodeMetaTXT || qType == dns.TypeTXT || qType == dns.TypeANY { - metas := d.makeTXTRecordFromNodeMeta(q.Name, n, cfg.NodeTTL) + metas := d.generateMeta(q.Name, n, cfg.NodeTTL) *metaTarget = append(*metaTarget, metas...) } return nil } -// lookupNode is used to look up a node in the Consul catalog within NodeServices. -// If the config is set to UseCache, it will get the record from the agent cache. func (d *DNSServer) lookupNode(cfg *dnsConfig, args *structs.NodeSpecificRequest) (*structs.IndexedNodeServices, error) { var out structs.IndexedNodeServices @@ -1443,8 +1410,7 @@ func (d *DNSServer) trimDNSResponse(cfg *dnsConfig, network string, req, resp *d } } -// lookupServiceNodes is used to look up a node in the Consul health catalog within ServiceNodes. -// If the config is set to UseCache, it will get the record from the agent cache. +// lookupServiceNodes returns nodes with a given service. func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, lookup serviceLookup) (structs.IndexedCheckServiceNodes, error) { serviceTags := []string{} if lookup.Tag != "" { @@ -1459,7 +1425,7 @@ func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, lookup serviceLookup) (st ServiceTags: serviceTags, TagFilter: lookup.Tag != "", QueryOptions: structs.QueryOptions{ - Token: d.coalesceDNSToken(), + Token: d.agent.tokens.UserToken(), AllowStale: cfg.AllowStale, MaxAge: cfg.CacheMaxAge, UseCache: cfg.UseCache, @@ -1481,8 +1447,8 @@ func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, lookup serviceLookup) (st return out, nil } -// handleServiceQuery is used to handle a service query -func (d *DNSServer) handleServiceQuery(cfg *dnsConfig, lookup serviceLookup, req, resp *dns.Msg) error { +// serviceLookup is used to handle a service query +func (d *DNSServer) serviceLookup(cfg *dnsConfig, lookup serviceLookup, req, resp *dns.Msg) error { out, err := d.lookupServiceNodes(cfg, lookup) if err != nil { return fmt.Errorf("rpc request failed: %w", err) @@ -1502,9 +1468,9 @@ func (d *DNSServer) handleServiceQuery(cfg *dnsConfig, lookup serviceLookup, req // Add various responses depending on the request qType := req.Question[0].Qtype if qType == dns.TypeSRV { - d.addServiceSRVRecordsToMessage(cfg, lookup, out.Nodes, req, resp, ttl, lookup.MaxRecursionLevel) + d.serviceSRVRecords(cfg, lookup, out.Nodes, req, resp, ttl, lookup.MaxRecursionLevel) } else { - d.addServiceNodeRecordsToMessage(cfg, lookup, out.Nodes, req, resp, ttl, lookup.MaxRecursionLevel) + d.serviceNodeRecords(cfg, lookup, out.Nodes, req, resp, ttl, lookup.MaxRecursionLevel) } if len(resp.Answer) == 0 { @@ -1530,14 +1496,14 @@ func ednsSubnetForRequest(req *dns.Msg) *dns.EDNS0_SUBNET { return nil } -// handlePreparedQuery is used to handle a prepared query. -func (d *DNSServer) handlePreparedQuery(cfg *dnsConfig, datacenter, query string, remoteAddr net.Addr, req, resp *dns.Msg, maxRecursionLevel int) error { +// preparedQueryLookup is used to handle a prepared query. +func (d *DNSServer) preparedQueryLookup(cfg *dnsConfig, datacenter, query string, remoteAddr net.Addr, req, resp *dns.Msg, maxRecursionLevel int) error { // Execute the prepared query. args := structs.PreparedQueryExecuteRequest{ Datacenter: datacenter, QueryIDOrName: query, QueryOptions: structs.QueryOptions{ - Token: d.coalesceDNSToken(), + Token: d.agent.tokens.UserToken(), AllowStale: cfg.AllowStale, MaxAge: cfg.CacheMaxAge, }, @@ -1612,9 +1578,9 @@ func (d *DNSServer) handlePreparedQuery(cfg *dnsConfig, datacenter, query string // because peering is not supported with prepared queries. lookup := serviceLookup{Datacenter: out.Datacenter} if qType == dns.TypeSRV { - d.addServiceSRVRecordsToMessage(cfg, lookup, out.Nodes, req, resp, ttl, maxRecursionLevel) + d.serviceSRVRecords(cfg, lookup, out.Nodes, req, resp, ttl, maxRecursionLevel) } else { - d.addServiceNodeRecordsToMessage(cfg, lookup, out.Nodes, req, resp, ttl, maxRecursionLevel) + d.serviceNodeRecords(cfg, lookup, out.Nodes, req, resp, ttl, maxRecursionLevel) } if len(resp.Answer) == 0 { @@ -1623,8 +1589,6 @@ func (d *DNSServer) handlePreparedQuery(cfg *dnsConfig, datacenter, query string return nil } -// lookupPreparedQuery is used to execute a PreparedQuery against the Consul catalog. -// If the config is set to UseCache, it will use agent cache. func (d *DNSServer) lookupPreparedQuery(cfg *dnsConfig, args structs.PreparedQueryExecuteRequest) (*structs.PreparedQueryExecuteResponse, error) { var out structs.PreparedQueryExecuteResponse @@ -1666,8 +1630,8 @@ RPC: return &out, nil } -// addServiceNodeRecordsToMessage is used to add the node records for a service lookup -func (d *DNSServer) addServiceNodeRecordsToMessage(cfg *dnsConfig, lookup serviceLookup, nodes structs.CheckServiceNodes, req, resp *dns.Msg, ttl time.Duration, maxRecursionLevel int) { +// serviceNodeRecords is used to add the node records for a service lookup +func (d *DNSServer) serviceNodeRecords(cfg *dnsConfig, lookup serviceLookup, nodes structs.CheckServiceNodes, req, resp *dns.Msg, ttl time.Duration, maxRecursionLevel int) { handled := make(map[string]struct{}) var answerCNAME []dns.RR = nil @@ -1675,7 +1639,7 @@ func (d *DNSServer) addServiceNodeRecordsToMessage(cfg *dnsConfig, lookup servic for _, node := range nodes { // Add the node record had_answer := false - records, _ := d.makeNodeServiceRecords(lookup, node, req, ttl, cfg, maxRecursionLevel) + records, _ := d.nodeServiceRecords(lookup, node, req, ttl, cfg, maxRecursionLevel) if len(records) == 0 { continue } @@ -1774,7 +1738,6 @@ func (d *DNSServer) encodeIPAsFqdn(questionName string, lookup serviceLookup, ip return fmt.Sprintf("%s.addr.%s.%s", ipStr, lookup.Datacenter, respDomain) } -// Craft dns records for a an A record for an IP address func makeARecord(qType uint16, ip net.IP, ttl time.Duration) dns.RR { var ipRecord dns.RR @@ -1807,13 +1770,13 @@ func makeARecord(qType uint16, ip net.IP, ttl time.Duration) dns.RR { // In case of an SRV query the answer will be a IN SRV and additional data will store an IN A to the node IP // Otherwise it will return a IN A record func (d *DNSServer) makeRecordFromNode(node *structs.Node, qType uint16, qName string, ttl time.Duration, maxRecursionLevel int) []dns.RR { - addrTranslate := dnsutil.TranslateAddressAcceptDomain + addrTranslate := TranslateAddressAcceptDomain if qType == dns.TypeA { - addrTranslate |= dnsutil.TranslateAddressAcceptIPv4 + addrTranslate |= TranslateAddressAcceptIPv4 } else if qType == dns.TypeAAAA { - addrTranslate |= dnsutil.TranslateAddressAcceptIPv6 + addrTranslate |= TranslateAddressAcceptIPv6 } else { - addrTranslate |= dnsutil.TranslateAddressAcceptAny + addrTranslate |= TranslateAddressAcceptAny } addr := d.agent.TranslateAddress(node.Datacenter, node.Address, node.TaggedAddresses, addrTranslate) @@ -1977,15 +1940,14 @@ MORE_REC: return answers, nil } -// Craft dns records from a CheckServiceNode struct -func (d *DNSServer) makeNodeServiceRecords(lookup serviceLookup, node structs.CheckServiceNode, req *dns.Msg, ttl time.Duration, cfg *dnsConfig, maxRecursionLevel int) ([]dns.RR, []dns.RR) { - addrTranslate := dnsutil.TranslateAddressAcceptDomain +func (d *DNSServer) nodeServiceRecords(lookup serviceLookup, node structs.CheckServiceNode, req *dns.Msg, ttl time.Duration, cfg *dnsConfig, maxRecursionLevel int) ([]dns.RR, []dns.RR) { + addrTranslate := TranslateAddressAcceptDomain if req.Question[0].Qtype == dns.TypeA { - addrTranslate |= dnsutil.TranslateAddressAcceptIPv4 + addrTranslate |= TranslateAddressAcceptIPv4 } else if req.Question[0].Qtype == dns.TypeAAAA { - addrTranslate |= dnsutil.TranslateAddressAcceptIPv6 + addrTranslate |= TranslateAddressAcceptIPv6 } else { - addrTranslate |= dnsutil.TranslateAddressAcceptAny + addrTranslate |= TranslateAddressAcceptAny } // The datacenter should be empty during translation if it is a peering lookup. @@ -2029,8 +1991,7 @@ func (d *DNSServer) makeNodeServiceRecords(lookup serviceLookup, node structs.Ch return d.makeRecordFromFQDN(lookup, serviceAddr, node, req, ttl, cfg, maxRecursionLevel) } -// Craft dns records for TXT from a node's metadata -func (d *DNSServer) makeTXTRecordFromNodeMeta(qName string, node *structs.Node, ttl time.Duration) []dns.RR { +func (d *DNSServer) generateMeta(qName string, node *structs.Node, ttl time.Duration) []dns.RR { extra := make([]dns.RR, 0, len(node.Meta)) for key, value := range node.Meta { txt := value @@ -2051,8 +2012,8 @@ func (d *DNSServer) makeTXTRecordFromNodeMeta(qName string, node *structs.Node, return extra } -// addServiceSRVRecordsToMessage is used to add the SRV records for a service lookup -func (d *DNSServer) addServiceSRVRecordsToMessage(cfg *dnsConfig, lookup serviceLookup, nodes structs.CheckServiceNodes, req, resp *dns.Msg, ttl time.Duration, maxRecursionLevel int) { +// serviceARecords is used to add the SRV records for a service lookup +func (d *DNSServer) serviceSRVRecords(cfg *dnsConfig, lookup serviceLookup, nodes structs.CheckServiceNodes, req, resp *dns.Msg, ttl time.Duration, maxRecursionLevel int) { handled := make(map[string]struct{}) for _, node := range nodes { @@ -2061,7 +2022,7 @@ func (d *DNSServer) addServiceSRVRecordsToMessage(cfg *dnsConfig, lookup service // The datacenter should be empty during translation if it is a peering lookup. // This should be fine because we should always prefer the WAN address. - serviceAddress := d.agent.TranslateServiceAddress(lookup.Datacenter, node.Service.Address, node.Service.TaggedAddresses, dnsutil.TranslateAddressAcceptAny) + serviceAddress := d.agent.TranslateServiceAddress(lookup.Datacenter, node.Service.Address, node.Service.TaggedAddresses, TranslateAddressAcceptAny) servicePort := d.agent.TranslateServicePort(lookup.Datacenter, node.Service.Port, node.Service.TaggedAddresses) tuple := fmt.Sprintf("%s:%s:%d", node.Node.Node, serviceAddress, servicePort) if _, ok := handled[tuple]; ok { @@ -2069,14 +2030,14 @@ func (d *DNSServer) addServiceSRVRecordsToMessage(cfg *dnsConfig, lookup service } handled[tuple] = struct{}{} - answers, extra := d.makeNodeServiceRecords(lookup, node, req, ttl, cfg, maxRecursionLevel) + answers, extra := d.nodeServiceRecords(lookup, node, req, ttl, cfg, maxRecursionLevel) respDomain := d.getResponseDomain(req.Question[0].Name) resp.Answer = append(resp.Answer, answers...) resp.Extra = append(resp.Extra, extra...) if cfg.NodeMetaTXT { - resp.Extra = append(resp.Extra, d.makeTXTRecordFromNodeMeta(nodeCanonicalDNSName(lookup, node.Node.Node, respDomain), node.Node, ttl)...) + resp.Extra = append(resp.Extra, d.generateMeta(nodeCanonicalDNSName(lookup, node.Node.Node, respDomain), node.Node, ttl)...) } } } @@ -2211,11 +2172,3 @@ func (d *DNSServer) resolveCNAME(cfg *dnsConfig, name string, maxRecursionLevel d.logger.Error("all resolvers failed for name", "name", name) return nil } - -func (d *DNSServer) coalesceDNSToken() string { - if d.agent.tokens.DNSToken() != "" { - return d.agent.tokens.DNSToken() - } else { - return d.agent.tokens.UserToken() - } -} diff --git a/agent/structs/dns.go b/agent/dns/dns.go similarity index 54% rename from agent/structs/dns.go rename to agent/dns/dns.go index eea42e8d37c72..9f8e785a390b6 100644 --- a/agent/structs/dns.go +++ b/agent/dns/dns.go @@ -1,9 +1,19 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -package structs +package dns -import "math/rand" +import ( + "math/rand" + "regexp" +) + +// MaxLabelLength is the maximum length for a name that can be used in DNS. +const MaxLabelLength = 63 + +// InvalidNameRe is a regex that matches characters which can not be included in +// a DNS name. +var InvalidNameRe = regexp.MustCompile(`[^A-Za-z0-9\\-]+`) type RecursorStrategy string diff --git a/agent/dns/dns_address.go b/agent/dns/dns_address.go deleted file mode 100644 index e1e61f689f78f..0000000000000 --- a/agent/dns/dns_address.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 -package dns - -import ( - "github.com/miekg/dns" - "net" - "strings" -) - -func newDNSAddress(addr string) *dnsAddress { - a := &dnsAddress{} - a.SetAddress(addr) - return a -} - -// dnsAddress is a wrapper around a string that represents a DNS address and -// provides helper methods for determining whether it is an IP or FQDN and -// whether it is internal or external to the domain. -type dnsAddress struct { - addr string - - // store an IP so helpers don't have to parse it multiple times - ip net.IP -} - -// SetAddress sets the address field and the ip field if the string is an IP. -func (a *dnsAddress) SetAddress(addr string) { - a.addr = addr - a.ip = net.ParseIP(addr) -} - -// IP returns the IP address if the address is an IP. -func (a *dnsAddress) IP() net.IP { - return a.ip -} - -// IsIP returns true if the address is an IP. -func (a *dnsAddress) IsIP() bool { - return a.IP() != nil -} - -// IsIPV4 returns true if the address is an IPv4 address. -func (a *dnsAddress) IsIPV4() bool { - if a.IP() == nil { - return false - } - return a.IP().To4() != nil -} - -// FQDN returns the FQDN if the address is not an IP. -func (a *dnsAddress) FQDN() string { - if !a.IsEmptyString() && !a.IsIP() { - return dns.Fqdn(a.addr) - } - return "" -} - -// IsFQDN returns true if the address is a FQDN and not an IP. -func (a *dnsAddress) IsFQDN() bool { - return !a.IsEmptyString() && !a.IsIP() && dns.IsFqdn(a.FQDN()) -} - -// String returns the address as a string. -func (a *dnsAddress) String() string { - return a.addr -} - -// IsEmptyString returns true if the address is an empty string. -func (a *dnsAddress) IsEmptyString() bool { - return a.addr == "" -} - -// IsInternalFQDN returns true if the address is a FQDN and is internal to the domain. -func (a *dnsAddress) IsInternalFQDN(domain string) bool { - return !a.IsIP() && a.IsFQDN() && strings.HasSuffix(a.FQDN(), domain) -} - -// IsInternalFQDNOrIP returns true if the address is an IP or a FQDN and is internal to the domain. -func (a *dnsAddress) IsInternalFQDNOrIP(domain string) bool { - return a.IsIP() || a.IsInternalFQDN(domain) -} - -// IsExternalFQDN returns true if the address is a FQDN and is external to the domain. -func (a *dnsAddress) IsExternalFQDN(domain string) bool { - return !a.IsIP() && a.IsFQDN() && strings.Count(a.FQDN(), ".") > 1 && !strings.HasSuffix(a.FQDN(), domain) -} diff --git a/agent/dns/dns_address_test.go b/agent/dns/dns_address_test.go deleted file mode 100644 index 93460437f2d9e..0000000000000 --- a/agent/dns/dns_address_test.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 -package dns - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func Test_dnsAddress(t *testing.T) { - const domain = "consul." - type expectedResults struct { - isIp bool - stringResult string - fqdn string - isFQDN bool - isEmptyString bool - isExternalFQDN bool - isInternalFQDN bool - isInternalFQDNOrIP bool - } - type testCase struct { - name string - input string - expectedResults expectedResults - } - testCases := []testCase{ - { - name: "empty string", - input: "", - expectedResults: expectedResults{ - isIp: false, - stringResult: "", - fqdn: "", - isFQDN: false, - isEmptyString: true, - isExternalFQDN: false, - isInternalFQDN: false, - isInternalFQDNOrIP: false, - }, - }, - { - name: "ipv4 address", - input: "127.0.0.1", - expectedResults: expectedResults{ - isIp: true, - stringResult: "127.0.0.1", - fqdn: "", - isFQDN: false, - isEmptyString: false, - isExternalFQDN: false, - isInternalFQDN: false, - isInternalFQDNOrIP: true, - }, - }, - { - name: "ipv6 address", - input: "2001:db8:1:2:cafe::1337", - expectedResults: expectedResults{ - isIp: true, - stringResult: "2001:db8:1:2:cafe::1337", - fqdn: "", - isFQDN: false, - isEmptyString: false, - isExternalFQDN: false, - isInternalFQDN: false, - isInternalFQDNOrIP: true, - }, - }, - { - name: "internal FQDN without trailing period", - input: "web.service.consul", - expectedResults: expectedResults{ - isIp: false, - stringResult: "web.service.consul", - fqdn: "web.service.consul.", - isFQDN: true, - isEmptyString: false, - isExternalFQDN: false, - isInternalFQDN: true, - isInternalFQDNOrIP: true, - }, - }, - { - name: "internal FQDN with period", - input: "web.service.consul.", - expectedResults: expectedResults{ - isIp: false, - stringResult: "web.service.consul.", - fqdn: "web.service.consul.", - isFQDN: true, - isEmptyString: false, - isExternalFQDN: false, - isInternalFQDN: true, - isInternalFQDNOrIP: true, - }, - }, - { - name: "server name", - input: "web.", - expectedResults: expectedResults{ - isIp: false, - stringResult: "web.", - fqdn: "web.", - isFQDN: true, - isEmptyString: false, - isExternalFQDN: false, - isInternalFQDN: false, - isInternalFQDNOrIP: false, - }, - }, - { - name: "external FQDN without trailing period", - input: "web.service.vault", - expectedResults: expectedResults{ - isIp: false, - stringResult: "web.service.vault", - fqdn: "web.service.vault.", - isFQDN: true, - isEmptyString: false, - isExternalFQDN: true, - isInternalFQDN: false, - isInternalFQDNOrIP: false, - }, - }, - { - name: "external FQDN with trailing period", - input: "web.service.vault.", - expectedResults: expectedResults{ - isIp: false, - stringResult: "web.service.vault.", - fqdn: "web.service.vault.", - isFQDN: true, - isEmptyString: false, - isExternalFQDN: true, - isInternalFQDN: false, - isInternalFQDNOrIP: false, - }, - }, - { - name: "another external FQDN", - input: "www.google.com", - expectedResults: expectedResults{ - isIp: false, - stringResult: "www.google.com", - fqdn: "www.google.com.", - isFQDN: true, - isEmptyString: false, - isExternalFQDN: true, - isInternalFQDN: false, - isInternalFQDNOrIP: false, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - dnsAddress := newDNSAddress(tc.input) - assert.Equal(t, tc.expectedResults.isIp, dnsAddress.IsIP()) - assert.Equal(t, tc.expectedResults.stringResult, dnsAddress.String()) - assert.Equal(t, tc.expectedResults.isFQDN, dnsAddress.IsFQDN()) - assert.Equal(t, tc.expectedResults.isEmptyString, dnsAddress.IsEmptyString()) - assert.Equal(t, tc.expectedResults.isExternalFQDN, dnsAddress.IsExternalFQDN(domain)) - assert.Equal(t, tc.expectedResults.isInternalFQDN, dnsAddress.IsInternalFQDN(domain)) - assert.Equal(t, tc.expectedResults.isInternalFQDNOrIP, dnsAddress.IsInternalFQDNOrIP(domain)) - }) - } -} diff --git a/agent/structs/dns_test.go b/agent/dns/dns_test.go similarity index 96% rename from agent/structs/dns_test.go rename to agent/dns/dns_test.go index 908473d2095c4..91dc3ea72a919 100644 --- a/agent/structs/dns_test.go +++ b/agent/dns/dns_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -package structs +package dns import ( "testing" diff --git a/agent/dns/mock_DNSRouter.go b/agent/dns/mock_DNSRouter.go deleted file mode 100644 index 788c894f588ce..0000000000000 --- a/agent/dns/mock_DNSRouter.go +++ /dev/null @@ -1,66 +0,0 @@ -// Code generated by mockery v2.37.1. DO NOT EDIT. - -package dns - -import ( - config "github.com/hashicorp/consul/agent/config" - miekgdns "github.com/miekg/dns" - - mock "github.com/stretchr/testify/mock" - - net "net" -) - -// MockDNSRouter is an autogenerated mock type for the DNSRouter type -type MockDNSRouter struct { - mock.Mock -} - -// HandleRequest provides a mock function with given fields: req, reqCtx, remoteAddress -func (_m *MockDNSRouter) HandleRequest(req *miekgdns.Msg, reqCtx Context, remoteAddress net.Addr) *miekgdns.Msg { - ret := _m.Called(req, reqCtx, remoteAddress) - - var r0 *miekgdns.Msg - if rf, ok := ret.Get(0).(func(*miekgdns.Msg, Context, net.Addr) *miekgdns.Msg); ok { - r0 = rf(req, reqCtx, remoteAddress) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*miekgdns.Msg) - } - } - - return r0 -} - -// ReloadConfig provides a mock function with given fields: newCfg -func (_m *MockDNSRouter) ReloadConfig(newCfg *config.RuntimeConfig) error { - ret := _m.Called(newCfg) - - var r0 error - if rf, ok := ret.Get(0).(func(*config.RuntimeConfig) error); ok { - r0 = rf(newCfg) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ServeDNS provides a mock function with given fields: w, req -func (_m *MockDNSRouter) ServeDNS(w miekgdns.ResponseWriter, req *miekgdns.Msg) { - _m.Called(w, req) -} - -// NewMockDNSRouter creates a new instance of MockDNSRouter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockDNSRouter(t interface { - mock.TestingT - Cleanup(func()) -}) *MockDNSRouter { - mock := &MockDNSRouter{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/agent/dns/mock_dnsRecursor.go b/agent/dns/mock_dnsRecursor.go deleted file mode 100644 index b590661da1cca..0000000000000 --- a/agent/dns/mock_dnsRecursor.go +++ /dev/null @@ -1,55 +0,0 @@ -// Code generated by mockery v2.32.4. DO NOT EDIT. - -package dns - -import ( - miekgdns "github.com/miekg/dns" - mock "github.com/stretchr/testify/mock" - - net "net" -) - -// mockDnsRecursor is an autogenerated mock type for the dnsRecursor type -type mockDnsRecursor struct { - mock.Mock -} - -// handle provides a mock function with given fields: req, cfgCtx, remoteAddr -func (_m *mockDnsRecursor) handle(req *miekgdns.Msg, cfgCtx *RouterDynamicConfig, remoteAddr net.Addr) (*miekgdns.Msg, error) { - ret := _m.Called(req, cfgCtx, remoteAddr) - - var r0 *miekgdns.Msg - var r1 error - if rf, ok := ret.Get(0).(func(*miekgdns.Msg, *RouterDynamicConfig, net.Addr) (*miekgdns.Msg, error)); ok { - return rf(req, cfgCtx, remoteAddr) - } - if rf, ok := ret.Get(0).(func(*miekgdns.Msg, *RouterDynamicConfig, net.Addr) *miekgdns.Msg); ok { - r0 = rf(req, cfgCtx, remoteAddr) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*miekgdns.Msg) - } - } - - if rf, ok := ret.Get(1).(func(*miekgdns.Msg, *RouterDynamicConfig, net.Addr) error); ok { - r1 = rf(req, cfgCtx, remoteAddr) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// newMockDnsRecursor creates a new instance of mockDnsRecursor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func newMockDnsRecursor(t interface { - mock.TestingT - Cleanup(func()) -}) *mockDnsRecursor { - mock := &mockDnsRecursor{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/agent/dns/parser.go b/agent/dns/parser.go deleted file mode 100644 index 1a0f0a601d1fc..0000000000000 --- a/agent/dns/parser.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dns - -// parsedLabels defines valid DNS labels that are possible for ALL DNS query in Consul. (v1 and v2, CE and ENT) -// It is the job of the parser to populate the struct, the routers to call the query processor, -// and the query processor to validate is the labels. -type parsedLabels struct { - Datacenter string - Namespace string - Partition string - Peer string - PeerOrDatacenter string // deprecated: use Datacenter or Peer - SamenessGroup string -} - -// ParseLabels can parse a DNS query's labels and returns a parsedLabels. -// It also does light validation according to invariants across all possible DNS queries for all Consul versions -func parseLabels(labels []string) (*parsedLabels, bool) { - var result parsedLabels - - switch len(labels) { - case 2, 4, 6: - // Supports the following formats: - // - [..ns][..ap][..dc] - // - . - // - [..ns][..ap][..peer] - // - [..sg][..ap][..ns] - for i := 0; i < len(labels); i += 2 { - switch labels[i+1] { - case "ns": - result.Namespace = labels[i] - case "ap": - result.Partition = labels[i] - case "dc", "cluster": - result.Datacenter = labels[i] - case "sg": - result.SamenessGroup = labels[i] - case "peer": - result.Peer = labels[i] - default: - // The only case in which labels[i+1] is allowed to be a value - // other than ns, ap, or dc is if n == 2 to support the format: - // .. - if len(labels) == 2 { - result.PeerOrDatacenter = labels[1] - result.Namespace = labels[0] - return &result, true - } - return nil, false - } - } - - // VALIDATIONS - // Return nil result and false boolean when both datacenter and peer are specified. - if result.Datacenter != "" && result.Peer != "" { - return nil, false - } - - // Validation e need to validate that this a valid DNS including sg - if result.SamenessGroup != "" && (result.Datacenter != "" || result.Peer != "") { - return nil, false - } - - return &result, true - - case 1: - result.PeerOrDatacenter = labels[0] - return &result, true - - case 0: - return &result, true - } - - return &result, false -} - -// parsePort looks through the query parts for a named port label. -// It assumes the only valid input format is["", "port", ""]. -// The other expected formats are [""] and ["", ""]. -// It is expected that the queryProcessor validates if the label is allowed for the query type. -func parsePort(parts []string) string { - // The minimum number of parts would be - if len(parts) != 3 || parts[1] != "port" { - return "" - } - return parts[0] -} diff --git a/agent/dns/recursor.go b/agent/dns/recursor.go deleted file mode 100644 index 21ea94a6c839e..0000000000000 --- a/agent/dns/recursor.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dns - -import ( - "errors" - "net" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/miekg/dns" - - "github.com/hashicorp/consul/ipaddr" - "github.com/hashicorp/consul/logging" -) - -type recursor struct { - logger hclog.Logger -} - -func newRecursor(logger hclog.Logger) *recursor { - return &recursor{ - logger: logger.Named(logging.DNS), - } -} - -// handle is used to process DNS queries for externally configured servers -func (r *recursor) handle(req *dns.Msg, cfgCtx *RouterDynamicConfig, remoteAddr net.Addr) (*dns.Msg, error) { - q := req.Question[0] - - network := "udp" - defer func(s time.Time) { - r.logger.Trace("request served from client", - "question", q, - "network", network, - "latency", time.Since(s).String(), - "client", remoteAddr.String(), - "client_network", remoteAddr.Network(), - ) - }(time.Now()) - - // Switch to TCP if the client is - if _, ok := remoteAddr.(*net.TCPAddr); ok { - network = "tcp" - } - - // Recursively resolve - c := &dns.Client{Net: network, Timeout: cfgCtx.RecursorTimeout} - var resp *dns.Msg - var rtt time.Duration - var err error - for _, idx := range cfgCtx.RecursorStrategy.Indexes(len(cfgCtx.Recursors)) { - recurseAddr := cfgCtx.Recursors[idx] - resp, rtt, err = c.Exchange(req, recurseAddr) - // Check if the response is valid and has the desired Response code - if resp != nil && (resp.Rcode != dns.RcodeSuccess && resp.Rcode != dns.RcodeNameError) { - r.logger.Trace("recurse failed for question", - "question", q, - "rtt", rtt, - "recursor", recurseAddr, - "rcode", dns.RcodeToString[resp.Rcode], - ) - // If we still have recursors to forward the query to, - // we move forward onto the next one else the loop ends - continue - } else if err == nil || (resp != nil && resp.Truncated) { - // Compress the response; we don't know if the incoming - // response was compressed or not, so by not compressing - // we might generate an invalid packet on the way out. - resp.Compress = !cfgCtx.DisableCompression - - // Forward the response - r.logger.Trace("recurse succeeded for question", - "question", q, - "rtt", rtt, - "recursor", recurseAddr, - ) - return resp, nil - } - r.logger.Error("recurse failed", "error", err) - } - - // If all resolvers fail, return a SERVFAIL message - r.logger.Error("all resolvers failed for question from client", - "question", q, - "client", remoteAddr.String(), - "client_network", remoteAddr.Network(), - ) - - return nil, errRecursionFailed -} - -// formatRecursorAddress is used to add a port to the recursor if omitted. -func formatRecursorAddress(recursor string) (string, error) { - _, _, err := net.SplitHostPort(recursor) - var ae *net.AddrError - if errors.As(err, &ae) { - switch ae.Err { - case "missing port in address": - recursor = ipaddr.FormatAddressPort(recursor, 53) - case "too many colons in address": - if ip := net.ParseIP(recursor); ip != nil && ip.To4() == nil { - recursor = ipaddr.FormatAddressPort(recursor, 53) - break - } - fallthrough - default: - return "", err - } - } else if err != nil { - return "", err - } - - // Get the address - addr, err := net.ResolveTCPAddr("tcp", recursor) - if err != nil { - return "", err - } - - // Return string - return addr.String(), nil -} diff --git a/agent/dns/recursor_test.go b/agent/dns/recursor_test.go deleted file mode 100644 index 69514e508e7bc..0000000000000 --- a/agent/dns/recursor_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dns - -import ( - "strings" - "testing" -) - -// Test_handle cases are covered by the integration tests in agent/dns_test.go. -// They should be moved here when the V1 DNS server is deprecated. -//func Test_handle(t *testing.T) { - -func Test_formatRecursorAddress(t *testing.T) { - t.Parallel() - addr, err := formatRecursorAddress("8.8.8.8") - if err != nil { - t.Fatalf("err: %v", err) - } - if addr != "8.8.8.8:53" { - t.Fatalf("bad: %v", addr) - } - addr, err = formatRecursorAddress("2001:4860:4860::8888") - if err != nil { - t.Fatalf("err: %v", err) - } - if addr != "[2001:4860:4860::8888]:53" { - t.Fatalf("bad: %v", addr) - } - _, err = formatRecursorAddress("1.2.3.4::53") - if err == nil || !strings.Contains(err.Error(), "too many colons in address") { - t.Fatalf("err: %v", err) - } - _, err = formatRecursorAddress("2001:4860:4860::8888:::53") - if err == nil || !strings.Contains(err.Error(), "too many colons in address") { - t.Fatalf("err: %v", err) - } -} diff --git a/agent/dns/router.go b/agent/dns/router.go deleted file mode 100644 index 267c4bd6fe722..0000000000000 --- a/agent/dns/router.go +++ /dev/null @@ -1,1458 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dns - -import ( - "encoding/hex" - "errors" - "fmt" - "net" - "regexp" - "strings" - "sync/atomic" - "time" - - "github.com/armon/go-metrics" - "github.com/armon/go-radix" - "github.com/miekg/dns" - - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/config" - "github.com/hashicorp/consul/agent/discovery" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/dnsutil" - "github.com/hashicorp/consul/logging" -) - -const ( - addrLabel = "addr" - - arpaDomain = "arpa." - arpaLabel = "arpa" - - suffixFailover = "failover." - suffixNoFailover = "no-failover." - maxRecursionLevelDefault = 3 // This field comes from the V1 DNS server and affects V1 catalog lookups - maxRecurseRecords = 5 -) - -var ( - errInvalidQuestion = fmt.Errorf("invalid question") - errNameNotFound = fmt.Errorf("name not found") - errNotImplemented = fmt.Errorf("not implemented") - errRecursionFailed = fmt.Errorf("recursion failed") - - trailingSpacesRE = regexp.MustCompile(" +$") -) - -// Context is used augment a DNS message with Consul-specific metadata. -type Context struct { - Token string - DefaultPartition string - DefaultDatacenter string -} - -// RouterDynamicConfig is the dynamic configuration that can be hot-reloaded -type RouterDynamicConfig struct { - ARecordLimit int - DisableCompression bool - EnableTruncate bool - NodeMetaTXT bool - NodeTTL time.Duration - Recursors []string - RecursorTimeout time.Duration - RecursorStrategy structs.RecursorStrategy - SOAConfig SOAConfig - // TTLRadix sets service TTLs by prefix, eg: "database-*" - TTLRadix *radix.Tree - // TTLStrict sets TTLs to service by full name match. It Has higher priority than TTLRadix - TTLStrict map[string]time.Duration - UDPAnswerLimit int -} - -type SOAConfig struct { - Refresh uint32 // 3600 by default - Retry uint32 // 600 - Expire uint32 // 86400 - Minttl uint32 // 0 -} - -// DiscoveryQueryProcessor is an interface that can be used by any consumer requesting Service Discovery results. -// This could be attached to a gRPC endpoint in the future in addition to DNS. -// Making this an interface means testing the router with a mock is trivial. -type DiscoveryQueryProcessor interface { - QueryByName(*discovery.Query, discovery.Context) ([]*discovery.Result, error) - QueryByIP(net.IP, discovery.Context) ([]*discovery.Result, error) -} - -// dnsRecursor is an interface that can be used to mock calls to external DNS servers for unit testing. -// -//go:generate mockery --name dnsRecursor --inpackage -type dnsRecursor interface { - handle(req *dns.Msg, cfgCtx *RouterDynamicConfig, remoteAddress net.Addr) (*dns.Msg, error) -} - -// Router replaces miekg/dns.ServeMux with a simpler router that only checks for the 2-3 valid domains -// that Consul supports and forwards to a single DiscoveryQueryProcessor handler. If there is no match, it will recurse. -type Router struct { - processor DiscoveryQueryProcessor - recursor dnsRecursor - domain string - altDomain string - datacenter string - nodeName string - logger hclog.Logger - - tokenFunc func() string - translateAddressFunc func(dc string, addr string, taggedAddresses map[string]string, accept dnsutil.TranslateAddressAccept) string - translateServiceAddressFunc func(dc string, address string, taggedAddresses map[string]structs.ServiceAddress, accept dnsutil.TranslateAddressAccept) string - - // dynamicConfig stores the config as an atomic value (for hot-reloading). - // It is always of type *RouterDynamicConfig - dynamicConfig atomic.Value -} - -var _ = dns.Handler(&Router{}) -var _ = DNSRouter(&Router{}) - -func NewRouter(cfg Config) (*Router, error) { - // Make sure domains are FQDN, make them case-insensitive for DNSRequestRouter - domain := dns.CanonicalName(cfg.AgentConfig.DNSDomain) - altDomain := dns.CanonicalName(cfg.AgentConfig.DNSAltDomain) - - logger := cfg.Logger.Named(logging.DNS) - - router := &Router{ - processor: cfg.Processor, - recursor: newRecursor(logger), - domain: domain, - altDomain: altDomain, - datacenter: cfg.AgentConfig.Datacenter, - logger: logger, - nodeName: cfg.AgentConfig.NodeName, - tokenFunc: cfg.TokenFunc, - translateAddressFunc: cfg.TranslateAddressFunc, - translateServiceAddressFunc: cfg.TranslateServiceAddressFunc, - } - - if err := router.ReloadConfig(cfg.AgentConfig); err != nil { - return nil, err - } - return router, nil -} - -// HandleRequest is used to process an individual DNS request. It returns a message in success or fail cases. -func (r *Router) HandleRequest(req *dns.Msg, reqCtx Context, remoteAddress net.Addr) *dns.Msg { - configCtx := r.dynamicConfig.Load().(*RouterDynamicConfig) - - r.logger.Trace("received request", "question", req.Question[0].Name, "type", dns.Type(req.Question[0].Qtype).String()) - - err := validateAndNormalizeRequest(req) - if err != nil { - r.logger.Error("error parsing DNS query", "error", err) - if errors.Is(err, errInvalidQuestion) { - return createRefusedResponse(req) - } - return createServerFailureResponse(req, configCtx, false) - } - - defer func(s time.Time, q dns.Question) { - metrics.MeasureSinceWithLabels([]string{"dns", "query"}, s, - []metrics.Label{ - {Name: "node", Value: r.nodeName}, - {Name: "type", Value: dns.Type(q.Qtype).String()}, - }) - - r.logger.Trace("request served from client", - "name", q.Name, - "type", dns.Type(q.Qtype).String(), - "class", dns.Class(q.Qclass).String(), - "latency", time.Since(s).String(), - "client", remoteAddress.String(), - "client_network", remoteAddress.Network(), - ) - }(time.Now(), req.Question[0]) - - return r.handleRequestRecursively(req, reqCtx, configCtx, remoteAddress, maxRecursionLevelDefault) -} - -// getErrorFromECSNotGlobalError returns the underlying error from an ECSNotGlobalError, if it exists. -func getErrorFromECSNotGlobalError(err error) error { - if errors.Is(err, discovery.ErrECSNotGlobal) { - return err.(discovery.ECSNotGlobalError).Unwrap() - } - return err -} - -// handleRequestRecursively is used to process an individual DNS request. It will recurse as needed -// a maximum number of times and returns a message in success or fail cases. -func (r *Router) handleRequestRecursively(req *dns.Msg, reqCtx Context, configCtx *RouterDynamicConfig, - remoteAddress net.Addr, maxRecursionLevel int) *dns.Msg { - - r.logger.Trace( - "received request", - "question", req.Question[0].Name, - "type", dns.Type(req.Question[0].Qtype).String(), - "recursion_remaining", maxRecursionLevel) - - responseDomain, needRecurse := r.parseDomain(req.Question[0].Name) - if needRecurse && !canRecurse(configCtx) { - // This is the same error as an unmatched domain - return createRefusedResponse(req) - } - - if needRecurse { - r.logger.Trace("checking recursors to handle request", "question", req.Question[0].Name, "type", dns.Type(req.Question[0].Qtype).String()) - - // This assumes `canRecurse(configCtx)` is true above - resp, err := r.recursor.handle(req, configCtx, remoteAddress) - if err != nil && !errors.Is(err, errRecursionFailed) { - r.logger.Error("unhandled error recursing DNS query", "error", err) - } - if err != nil { - return createServerFailureResponse(req, configCtx, true) - } - return resp - } - - // Need to pass the question name to properly support recursion and the - // trimming of the domain suffixes. - qName := dns.CanonicalName(req.Question[0].Name) - if maxRecursionLevel < maxRecursionLevelDefault { - // Get the QName without the domain suffix - qName = r.trimDomain(qName) - } - - reqType := parseRequestType(req) - results, query, err := r.getQueryResults(req, reqCtx, reqType, qName, remoteAddress) - - // in case of the wrapped ECSNotGlobalError, extract the error from it. - isECSGlobal := !errors.Is(err, discovery.ErrECSNotGlobal) - err = getErrorFromECSNotGlobalError(err) - if err != nil { - return r.generateResponseFromError(req, err, qName, configCtx, responseDomain, - isECSGlobal, query, canRecurse(configCtx)) - } - - r.logger.Trace("serializing results", "question", req.Question[0].Name, "results-found", len(results)) - - // This needs the question information because it affects the serialization format. - // e.g., the Consul service has the same "results" for both NS and A/AAAA queries, but the serialization differs. - resp, err := r.serializeQueryResults(req, reqCtx, query, results, configCtx, responseDomain, remoteAddress, maxRecursionLevel) - if err != nil { - r.logger.Error("error serializing DNS results", "error", err) - return r.generateResponseFromError(req, err, qName, configCtx, responseDomain, - false, query, false) - } - - // Switch to TCP if the client is - network := "udp" - if _, ok := remoteAddress.(*net.TCPAddr); ok { - network = "tcp" - } - - trimDNSResponse(configCtx, network, req, resp, r.logger) - - setEDNS(req, resp, isECSGlobal) - return resp -} - -// generateResponseFromError generates a response from an error. -func (r *Router) generateResponseFromError(req *dns.Msg, err error, qName string, - configCtx *RouterDynamicConfig, responseDomain string, isECSGlobal bool, - query *discovery.Query, canRecurse bool) *dns.Msg { - switch { - case errors.Is(err, errInvalidQuestion): - r.logger.Error("invalid question", "name", qName) - - return createAuthoritativeResponse(req, configCtx, responseDomain, dns.RcodeNameError, isECSGlobal) - case errors.Is(err, errNameNotFound): - r.logger.Error("name not found", "name", qName) - - return createAuthoritativeResponse(req, configCtx, responseDomain, dns.RcodeNameError, isECSGlobal) - case errors.Is(err, errNotImplemented): - r.logger.Error("query not implemented", "name", qName, "type", dns.Type(req.Question[0].Qtype).String()) - - return createAuthoritativeResponse(req, configCtx, responseDomain, dns.RcodeNotImplemented, isECSGlobal) - case errors.Is(err, discovery.ErrNotSupported): - r.logger.Debug("query name syntax not supported", "name", req.Question[0].Name) - - return createAuthoritativeResponse(req, configCtx, responseDomain, dns.RcodeNameError, isECSGlobal) - case errors.Is(err, discovery.ErrNotFound): - r.logger.Debug("query name not found", "name", req.Question[0].Name) - - return createAuthoritativeResponse(req, configCtx, responseDomain, dns.RcodeNameError, isECSGlobal) - case errors.Is(err, discovery.ErrNoData): - r.logger.Debug("no data available", "name", qName) - - return createAuthoritativeResponse(req, configCtx, responseDomain, dns.RcodeSuccess, isECSGlobal) - case errors.Is(err, discovery.ErrNoPathToDatacenter): - dc := "" - if query != nil { - dc = query.QueryPayload.Tenancy.Datacenter - } - r.logger.Debug("no path to datacenter", "datacenter", dc) - return createAuthoritativeResponse(req, configCtx, responseDomain, dns.RcodeNameError, isECSGlobal) - } - r.logger.Error("error processing discovery query", "error", err) - return createServerFailureResponse(req, configCtx, canRecurse) -} - -// trimDomain trims the domain from the question name. -func (r *Router) trimDomain(questionName string) string { - longer := r.domain - shorter := r.altDomain - - if len(shorter) > len(longer) { - longer, shorter = shorter, longer - } - - if strings.HasSuffix(questionName, "."+strings.TrimLeft(longer, ".")) { - return strings.TrimSuffix(questionName, longer) - } - return strings.TrimSuffix(questionName, shorter) -} - -// getTTLForResult returns the TTL for a given result. -func getTTLForResult(name string, overrideTTL *uint32, query *discovery.Query, cfg *RouterDynamicConfig) uint32 { - // In the case we are not making a discovery query, such as addr. or arpa. lookups, - // use the node TTL by convention - if query == nil { - return uint32(cfg.NodeTTL / time.Second) - } - - if overrideTTL != nil { - // If a result was provided with an override, use that. This is the case for some prepared queries. - return *overrideTTL - } - - switch query.QueryType { - case discovery.QueryTypeService, discovery.QueryTypePreparedQuery: - ttl, ok := cfg.getTTLForService(name) - if ok { - return uint32(ttl / time.Second) - } - fallthrough - default: - return uint32(cfg.NodeTTL / time.Second) - } -} - -// getQueryResults returns a discovery.Result from a DNS message. -func (r *Router) getQueryResults(req *dns.Msg, reqCtx Context, reqType requestType, - qName string, remoteAddress net.Addr) ([]*discovery.Result, *discovery.Query, error) { - switch reqType { - case requestTypeConsul: - // This is a special case of discovery.QueryByName where we know that we need to query the consul service - // regardless of the question name. - query := &discovery.Query{ - QueryType: discovery.QueryTypeService, - QueryPayload: discovery.QueryPayload{ - Name: structs.ConsulServiceName, - Tenancy: discovery.QueryTenancy{ - // We specify the partition here so that in the case we are a client agent in a non-default partition. - // We don't want the query processors default partition to be used. - // This is a small hack because for V1 CE, this is not the correct default partition name, but we - // need to add something to disambiguate the empty field. - Partition: acl.DefaultPartitionName, //NOTE: note this won't work if we ever have V2 client agents - }, - Limit: 3, - }, - } - - results, err := r.processor.QueryByName(query, discovery.Context{Token: reqCtx.Token}) - return results, query, err - case requestTypeName: - query, err := buildQueryFromDNSMessage(req, reqCtx, r.domain, r.altDomain, remoteAddress) - if err != nil { - r.logger.Error("error building discovery query from DNS request", "error", err) - return nil, query, err - } - results, err := r.processor.QueryByName(query, discovery.Context{Token: reqCtx.Token}) - - if getErrorFromECSNotGlobalError(err) != nil { - r.logger.Error("error processing discovery query", "error", err) - return nil, query, err - } - return results, query, err - case requestTypeIP: - ip := dnsutil.IPFromARPA(qName) - if ip == nil { - r.logger.Error("error building IP from DNS request", "name", qName) - return nil, nil, errNameNotFound - } - results, err := r.processor.QueryByIP(ip, discovery.Context{Token: reqCtx.Token}) - return results, nil, err - case requestTypeAddress: - results, err := buildAddressResults(req) - if err != nil { - r.logger.Error("error processing discovery query", "error", err) - return nil, nil, err - } - return results, nil, nil - } - - r.logger.Error("error parsing discovery query type", "requestType", reqType) - return nil, nil, errInvalidQuestion -} - -// ServeDNS implements the miekg/dns.Handler interface. -// This is a standard DNS listener, so we inject a default request context based on the agent's config. -func (r *Router) ServeDNS(w dns.ResponseWriter, req *dns.Msg) { - reqCtx := r.defaultAgentDNSRequestContext() - out := r.HandleRequest(req, reqCtx, w.RemoteAddr()) - w.WriteMsg(out) -} - -// ReloadConfig hot-reloads the router config with new parameters -func (r *Router) ReloadConfig(newCfg *config.RuntimeConfig) error { - cfg, err := getDynamicRouterConfig(newCfg) - if err != nil { - return fmt.Errorf("error loading DNS config: %w", err) - } - r.dynamicConfig.Store(cfg) - return nil -} - -// getTTLForService Find the TTL for a given service. -// return ttl, true if found, 0, false otherwise -func (cfg *RouterDynamicConfig) getTTLForService(service string) (time.Duration, bool) { - if cfg.TTLStrict != nil { - ttl, ok := cfg.TTLStrict[service] - if ok { - return ttl, true - } - } - if cfg.TTLRadix != nil { - _, ttlRaw, ok := cfg.TTLRadix.LongestPrefix(service) - if ok { - return ttlRaw.(time.Duration), true - } - } - return 0, false -} - -// Request type is similar to miekg/dns.Type, but correlates to the different query processors we might need to invoke. -type requestType string - -const ( - requestTypeName requestType = "NAME" // A/AAAA/CNAME/SRV - requestTypeIP requestType = "IP" // PTR - requestTypeAddress requestType = "ADDR" // Custom addr. A/AAAA lookups - requestTypeConsul requestType = "CONSUL" // SOA/NS -) - -// parseDomain converts a DNS message into a generic discovery request. -// If the request domain does not match "consul." or the alternative domain, -// it will return true for needRecurse. The logic is based on miekg/dns.ServeDNS matcher. -// The implementation assumes that the only valid domains are "consul." and the alternative domain, and -// that DS query types are not supported. -func (r *Router) parseDomain(questionName string) (string, bool) { - target := dns.CanonicalName(questionName) - target, _ = stripSuffix(target) - - for offset, overflow := 0, false; !overflow; offset, overflow = dns.NextLabel(target, offset) { - subdomain := target[offset:] - switch subdomain { - case ".": - // We don't support consul having a domain or altdomain attached to the root. - return "", true - case r.domain: - return r.domain, false - case r.altDomain: - return r.altDomain, false - case arpaDomain: - // PTR queries always respond with the primary domain. - return r.domain, false - // Default: fallthrough - } - } - // No match found; recurse if possible - return "", true -} - -// parseRequestType inspects the DNS message type and question name to determine the requestType of request. -// We assume by the time this is called, we are responding to a question with a domain we serve. -// This is used internally to determine which query processor method (if any) to invoke. -func parseRequestType(req *dns.Msg) requestType { - switch { - case req.Question[0].Qtype == dns.TypeSOA || req.Question[0].Qtype == dns.TypeNS: - // SOA and NS type supersede the domain - // NOTE!: In V1 of the DNS server it was possible to serve a PTR lookup using the arpa domain but a SOA question type. - // This also included the SOA record. This seemed inconsistent and unnecessary - it was removed for simplicity. - return requestTypeConsul - case isPTRSubdomain(req.Question[0].Name): - return requestTypeIP - case isAddrSubdomain(req.Question[0].Name): - return requestTypeAddress - default: - return requestTypeName - } -} - -func getPortsFromResult(result *discovery.Result) []discovery.Port { - if len(result.Ports) > 0 { - return result.Ports - } - // return one record. - return []discovery.Port{{}} -} - -// serializeQueryResults converts a discovery.Result into a DNS message. -func (r *Router) serializeQueryResults(req *dns.Msg, reqCtx Context, - query *discovery.Query, results []*discovery.Result, cfg *RouterDynamicConfig, - responseDomain string, remoteAddress net.Addr, maxRecursionLevel int) (*dns.Msg, error) { - resp := new(dns.Msg) - resp.SetReply(req) - resp.Compress = !cfg.DisableCompression - resp.Authoritative = true - resp.RecursionAvailable = canRecurse(cfg) - - qType := req.Question[0].Qtype - reqType := parseRequestType(req) - - // Always add the SOA record if requested. - switch { - case qType == dns.TypeSOA: - resp.Answer = append(resp.Answer, makeSOARecord(responseDomain, cfg)) - for _, result := range results { - for _, port := range getPortsFromResult(result) { - ans, ex, ns := r.getAnswerExtraAndNs(result, port, req, reqCtx, query, cfg, responseDomain, remoteAddress, maxRecursionLevel) - resp.Answer = append(resp.Answer, ans...) - resp.Extra = append(resp.Extra, ex...) - resp.Ns = append(resp.Ns, ns...) - } - } - case reqType == requestTypeAddress: - for _, result := range results { - for _, port := range getPortsFromResult(result) { - ans, ex, ns := r.getAnswerExtraAndNs(result, port, req, reqCtx, query, cfg, responseDomain, remoteAddress, maxRecursionLevel) - resp.Answer = append(resp.Answer, ans...) - resp.Extra = append(resp.Extra, ex...) - resp.Ns = append(resp.Ns, ns...) - } - } - case qType == dns.TypeSRV: - handled := make(map[string]struct{}) - for _, result := range results { - for _, port := range getPortsFromResult(result) { - - // Avoid duplicate entries, possible if a node has - // the same service the same port, etc. - - // The datacenter should be empty during translation if it is a peering lookup. - // This should be fine because we should always prefer the WAN address. - - address := "" - if result.Service != nil { - address = result.Service.Address - } else { - address = result.Node.Address - } - tuple := fmt.Sprintf("%s:%s:%d", result.Node.Name, address, port.Number) - if _, ok := handled[tuple]; ok { - continue - } - handled[tuple] = struct{}{} - - ans, ex, ns := r.getAnswerExtraAndNs(result, port, req, reqCtx, query, cfg, responseDomain, remoteAddress, maxRecursionLevel) - resp.Answer = append(resp.Answer, ans...) - resp.Extra = append(resp.Extra, ex...) - resp.Ns = append(resp.Ns, ns...) - } - } - default: - // default will send it to where it does some de-duping while it calls getAnswerExtraAndNs and recurses. - r.appendResultsToDNSResponse(req, reqCtx, query, resp, results, cfg, responseDomain, remoteAddress, maxRecursionLevel) - } - - if query != nil && query.QueryType != discovery.QueryTypeVirtual && - len(resp.Answer) == 0 && len(resp.Extra) == 0 { - return nil, discovery.ErrNoData - } - - return resp, nil -} - -// getServiceAddressMapFromLocationMap converts a map of Location to a map of ServiceAddress. -func getServiceAddressMapFromLocationMap(taggedAddresses map[string]*discovery.TaggedAddress) map[string]structs.ServiceAddress { - taggedServiceAddresses := make(map[string]structs.ServiceAddress, len(taggedAddresses)) - for k, v := range taggedAddresses { - taggedServiceAddresses[k] = structs.ServiceAddress{ - Address: v.Address, - Port: int(v.Port.Number), - } - } - return taggedServiceAddresses -} - -// getStringAddressMapFromTaggedAddressMap converts a map of Location to a map of string. -func getStringAddressMapFromTaggedAddressMap(taggedAddresses map[string]*discovery.TaggedAddress) map[string]string { - taggedServiceAddresses := make(map[string]string, len(taggedAddresses)) - for k, v := range taggedAddresses { - taggedServiceAddresses[k] = v.Address - } - return taggedServiceAddresses -} - -// appendResultsToDNSResponse builds dns message from the discovery results and -// appends them to the dns response. -func (r *Router) appendResultsToDNSResponse(req *dns.Msg, reqCtx Context, - query *discovery.Query, resp *dns.Msg, results []*discovery.Result, cfg *RouterDynamicConfig, - responseDomain string, remoteAddress net.Addr, maxRecursionLevel int) { - - // Always add the SOA record if requested. - if req.Question[0].Qtype == dns.TypeSOA { - resp.Answer = append(resp.Answer, makeSOARecord(responseDomain, cfg)) - } - - handled := make(map[string]struct{}) - var answerCNAME []dns.RR = nil - - count := 0 - for _, result := range results { - for _, port := range getPortsFromResult(result) { - - // Add the node record - had_answer := false - ans, extra, _ := r.getAnswerExtraAndNs(result, port, req, reqCtx, query, cfg, responseDomain, remoteAddress, maxRecursionLevel) - resp.Extra = append(resp.Extra, extra...) - - if len(ans) == 0 { - continue - } - - // Avoid duplicate entries, possible if a node has - // the same service on multiple ports, etc. - if _, ok := handled[ans[0].String()]; ok { - continue - } - handled[ans[0].String()] = struct{}{} - - switch ans[0].(type) { - case *dns.CNAME: - // keep track of the first CNAME + associated RRs but don't add to the resp.Answer yet - // this will only be added if no non-CNAME RRs are found - if len(answerCNAME) == 0 { - answerCNAME = ans - } - default: - resp.Answer = append(resp.Answer, ans...) - had_answer = true - } - - if had_answer { - count++ - if count == cfg.ARecordLimit { - // We stop only if greater than 0 or we reached the limit - return - } - } - } - } - if len(resp.Answer) == 0 && len(answerCNAME) > 0 { - resp.Answer = answerCNAME - } -} - -// defaultAgentDNSRequestContext returns a default request context based on the agent's config. -func (r *Router) defaultAgentDNSRequestContext() Context { - return Context{ - Token: r.tokenFunc(), - DefaultDatacenter: r.datacenter, - // We don't need to specify the agent's partition here because that will be handled further down the stack - // in the query processor. - } -} - -// resolveCNAME is used to recursively resolve CNAME records -func (r *Router) resolveCNAME(cfgContext *RouterDynamicConfig, name string, reqCtx Context, - remoteAddress net.Addr, maxRecursionLevel int) []dns.RR { - // If the CNAME record points to a Consul address, resolve it internally - // Convert query to lowercase because DNS is case-insensitive; r.domain and - // r.altDomain are already converted - - if ln := strings.ToLower(name); strings.HasSuffix(ln, "."+r.domain) || strings.HasSuffix(ln, "."+r.altDomain) { - if maxRecursionLevel < 1 { - r.logger.Error("Infinite recursion detected for name, won't perform any CNAME resolution.", "name", name) - return nil - } - req := &dns.Msg{} - - req.SetQuestion(name, dns.TypeANY) - // TODO: handle error response (this is a comment from the V1 DNS Server) - resp := r.handleRequestRecursively(req, reqCtx, cfgContext, nil, maxRecursionLevel-1) - - return resp.Answer - } - - // Do nothing if we don't have a recursor - if !canRecurse(cfgContext) { - return nil - } - - // Ask for any A records - m := new(dns.Msg) - m.SetQuestion(name, dns.TypeA) - - // Make a DNS lookup request - recursorResponse, err := r.recursor.handle(m, cfgContext, remoteAddress) - if err == nil { - return recursorResponse.Answer - } - - r.logger.Error("all resolvers failed for name", "name", name) - return nil -} - -// validateAndNormalizeRequest validates the DNS request and normalizes the request name. -func validateAndNormalizeRequest(req *dns.Msg) error { - // like upstream miekg/dns, we require at least one question, - // but we will only answer the first. - if len(req.Question) == 0 { - return errInvalidQuestion - } - - // We mutate the request name to respond with the canonical name. - // This is Consul convention. - req.Question[0].Name = dns.CanonicalName(req.Question[0].Name) - return nil -} - -// stripSuffix strips off the suffixes that may have been added to the request name. -func stripSuffix(target string) (string, bool) { - enableFailover := false - - // Strip off any suffixes that may have been added. - offset, underflow := dns.PrevLabel(target, 1) - if !underflow { - maybeSuffix := target[offset:] - switch maybeSuffix { - case suffixFailover: - target = target[:offset] - enableFailover = true - case suffixNoFailover: - target = target[:offset] - } - } - return target, enableFailover -} - -// isAddrSubdomain returns true if the domain is a valid addr subdomain. -func isAddrSubdomain(domain string) bool { - labels := dns.SplitDomainName(domain) - - // Looking for .addr..consul. - if len(labels) > 2 { - return labels[1] == addrLabel - } - return false -} - -// isPTRSubdomain returns true if the domain ends in the PTR domain, "in-addr.arpa.". -func isPTRSubdomain(domain string) bool { - labels := dns.SplitDomainName(domain) - labelCount := len(labels) - - // We keep this check brief so we can have more specific error handling later. - if labelCount < 1 { - return false - } - - return labels[labelCount-1] == arpaLabel -} - -// getDynamicRouterConfig takes agent config and creates/resets the config used by DNS Router -func getDynamicRouterConfig(conf *config.RuntimeConfig) (*RouterDynamicConfig, error) { - cfg := &RouterDynamicConfig{ - ARecordLimit: conf.DNSARecordLimit, - EnableTruncate: conf.DNSEnableTruncate, - NodeTTL: conf.DNSNodeTTL, - RecursorStrategy: conf.DNSRecursorStrategy, - RecursorTimeout: conf.DNSRecursorTimeout, - UDPAnswerLimit: conf.DNSUDPAnswerLimit, - NodeMetaTXT: conf.DNSNodeMetaTXT, - DisableCompression: conf.DNSDisableCompression, - SOAConfig: SOAConfig{ - Expire: conf.DNSSOA.Expire, - Minttl: conf.DNSSOA.Minttl, - Refresh: conf.DNSSOA.Refresh, - Retry: conf.DNSSOA.Retry, - }, - } - - if conf.DNSServiceTTL != nil { - cfg.TTLRadix = radix.New() - cfg.TTLStrict = make(map[string]time.Duration) - - for key, ttl := range conf.DNSServiceTTL { - // All suffix with '*' are put in radix - // This include '*' that will match anything - if strings.HasSuffix(key, "*") { - cfg.TTLRadix.Insert(key[:len(key)-1], ttl) - } else { - cfg.TTLStrict[key] = ttl - } - } - } else { - cfg.TTLRadix = nil - cfg.TTLStrict = nil - } - - for _, r := range conf.DNSRecursors { - ra, err := formatRecursorAddress(r) - if err != nil { - return nil, fmt.Errorf("invalid recursor address: %w", err) - } - cfg.Recursors = append(cfg.Recursors, ra) - } - - return cfg, nil -} - -// canRecurse returns true if the router can recurse on the request. -func canRecurse(cfg *RouterDynamicConfig) bool { - return len(cfg.Recursors) > 0 -} - -// createServerFailureResponse returns a SERVFAIL message. -func createServerFailureResponse(req *dns.Msg, cfg *RouterDynamicConfig, recursionAvailable bool) *dns.Msg { - // Return a SERVFAIL message - m := &dns.Msg{} - m.SetReply(req) - m.Compress = !cfg.DisableCompression - m.SetRcode(req, dns.RcodeServerFailure) - m.RecursionAvailable = recursionAvailable - if edns := req.IsEdns0(); edns != nil { - setEDNS(req, m, true) - } - - return m -} - -// setEDNS is used to set the responses EDNS size headers and -// possibly the ECS headers as well if they were present in the -// original request -func setEDNS(request *dns.Msg, response *dns.Msg, ecsGlobal bool) { - edns := request.IsEdns0() - if edns == nil { - return - } - - // cannot just use the SetEdns0 function as we need to embed - // the ECS option as well - ednsResp := new(dns.OPT) - ednsResp.Hdr.Name = "." - ednsResp.Hdr.Rrtype = dns.TypeOPT - ednsResp.SetUDPSize(edns.UDPSize()) - - // Set up the ECS option if present - if subnet := ednsSubnetForRequest(request); subnet != nil { - subOp := new(dns.EDNS0_SUBNET) - subOp.Code = dns.EDNS0SUBNET - subOp.Family = subnet.Family - subOp.Address = subnet.Address - subOp.SourceNetmask = subnet.SourceNetmask - if c := response.Rcode; ecsGlobal || c == dns.RcodeNameError || c == dns.RcodeServerFailure || c == dns.RcodeRefused || c == dns.RcodeNotImplemented { - // reply is globally valid and should be cached accordingly - subOp.SourceScope = 0 - } else { - // reply is only valid for the subnet it was queried with - subOp.SourceScope = subnet.SourceNetmask - } - ednsResp.Option = append(ednsResp.Option, subOp) - } - - response.Extra = append(response.Extra, ednsResp) -} - -// ednsSubnetForRequest looks through the request to find any EDS subnet options -func ednsSubnetForRequest(req *dns.Msg) *dns.EDNS0_SUBNET { - // IsEdns0 returns the EDNS RR if present or nil otherwise - edns := req.IsEdns0() - if edns == nil { - return nil - } - - for _, o := range edns.Option { - if subnet, ok := o.(*dns.EDNS0_SUBNET); ok { - return subnet - } - } - return nil -} - -// createRefusedResponse returns a REFUSED message. This is the default behavior for unmatched queries in -// upstream miekg/dns. -func createRefusedResponse(req *dns.Msg) *dns.Msg { - // Return a REFUSED message - m := &dns.Msg{} - m.SetRcode(req, dns.RcodeRefused) - return m -} - -// createAuthoritativeResponse returns an authoritative message that contains the SOA in the event that data is -// not return for a query. There can be multiple reasons for not returning data, hence the rcode argument. -func createAuthoritativeResponse(req *dns.Msg, cfg *RouterDynamicConfig, domain string, rcode int, ecsGlobal bool) *dns.Msg { - m := &dns.Msg{} - m.SetRcode(req, rcode) - m.Compress = !cfg.DisableCompression - m.Authoritative = true - m.RecursionAvailable = canRecurse(cfg) - if edns := req.IsEdns0(); edns != nil { - setEDNS(req, m, ecsGlobal) - } - - // We add the SOA on NameErrors - soa := makeSOARecord(domain, cfg) - m.Ns = append(m.Ns, soa) - - return m -} - -// buildAddressResults returns a discovery.Result from a DNS request for addr. records. -func buildAddressResults(req *dns.Msg) ([]*discovery.Result, error) { - domain := dns.CanonicalName(req.Question[0].Name) - labels := dns.SplitDomainName(domain) - hexadecimal := labels[0] - - if len(hexadecimal)/2 != 4 && len(hexadecimal)/2 != 16 { - return nil, errNameNotFound - } - - var ip net.IP - ip, err := hex.DecodeString(hexadecimal) - if err != nil { - return nil, errNameNotFound - } - - return []*discovery.Result{ - { - Node: &discovery.Location{ - Address: ip.String(), - }, - Type: discovery.ResultTypeNode, // We choose node by convention since we do not know the origin of the IP - }, - }, nil -} - -// getAnswerAndExtra creates the dns answer and extra from discovery results. -func (r *Router) getAnswerExtraAndNs(result *discovery.Result, port discovery.Port, req *dns.Msg, reqCtx Context, - query *discovery.Query, cfg *RouterDynamicConfig, domain string, remoteAddress net.Addr, - maxRecursionLevel int) (answer []dns.RR, extra []dns.RR, ns []dns.RR) { - serviceAddress, nodeAddress := r.getServiceAndNodeAddresses(result, req) - qName := req.Question[0].Name - ttlLookupName := qName - if query != nil { - ttlLookupName = query.QueryPayload.Name - } - - ttl := getTTLForResult(ttlLookupName, result.DNS.TTL, query, cfg) - - qType := req.Question[0].Qtype - - // TODO (v2-dns): skip records that refer to a workload/node that don't have a valid DNS name. - - // Special case responses - switch { - // PTR requests are first since they are a special case of domain overriding question type - case parseRequestType(req) == requestTypeIP: - ptrTarget := "" - if result.Type == discovery.ResultTypeNode { - ptrTarget = result.Node.Name - } else if result.Type == discovery.ResultTypeService { - ptrTarget = result.Service.Name - } - - ptr := &dns.PTR{ - Hdr: dns.RR_Header{Name: qName, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: 0}, - Ptr: canonicalNameForResult(result.Type, ptrTarget, domain, result.Tenancy, port.Name), - } - answer = append(answer, ptr) - case qType == dns.TypeNS: - resultType := result.Type - target := result.Node.Name - if parseRequestType(req) == requestTypeConsul && resultType == discovery.ResultTypeService { - resultType = discovery.ResultTypeNode - } - fqdn := canonicalNameForResult(resultType, target, domain, result.Tenancy, port.Name) - extraRecord := makeIPBasedRecord(fqdn, nodeAddress, ttl) - - answer = append(answer, makeNSRecord(domain, fqdn, ttl)) - extra = append(extra, extraRecord) - case qType == dns.TypeSOA: - // to be returned in the result. - fqdn := canonicalNameForResult(result.Type, result.Node.Name, domain, result.Tenancy, port.Name) - extraRecord := makeIPBasedRecord(fqdn, nodeAddress, ttl) - - ns = append(ns, makeNSRecord(domain, fqdn, ttl)) - extra = append(extra, extraRecord) - case qType == dns.TypeSRV: - // We put A/AAAA/CNAME records in the additional section for SRV requests - a, e := r.getAnswerExtrasForAddressAndTarget(nodeAddress, serviceAddress, req, reqCtx, - result, port, ttl, remoteAddress, cfg, domain, maxRecursionLevel) - answer = append(answer, a...) - extra = append(extra, e...) - - default: - a, e := r.getAnswerExtrasForAddressAndTarget(nodeAddress, serviceAddress, req, reqCtx, - result, port, ttl, remoteAddress, cfg, domain, maxRecursionLevel) - answer = append(answer, a...) - extra = append(extra, e...) - } - - a, e := getAnswerAndExtraTXT(req, cfg, qName, result, ttl, domain, query, &port) - answer = append(answer, a...) - extra = append(extra, e...) - return -} - -// getServiceAndNodeAddresses returns the service and node addresses from a discovery result. -func (r *Router) getServiceAndNodeAddresses(result *discovery.Result, req *dns.Msg) (*dnsAddress, *dnsAddress) { - addrTranslate := dnsutil.TranslateAddressAcceptDomain - if req.Question[0].Qtype == dns.TypeA { - addrTranslate |= dnsutil.TranslateAddressAcceptIPv4 - } else if req.Question[0].Qtype == dns.TypeAAAA { - addrTranslate |= dnsutil.TranslateAddressAcceptIPv6 - } else { - addrTranslate |= dnsutil.TranslateAddressAcceptAny - } - - // The datacenter should be empty during translation if it is a peering lookup. - // This should be fine because we should always prefer the WAN address. - serviceAddress := newDNSAddress("") - if result.Service != nil { - sa := r.translateServiceAddressFunc(result.Tenancy.Datacenter, - result.Service.Address, getServiceAddressMapFromLocationMap(result.Service.TaggedAddresses), - addrTranslate) - serviceAddress = newDNSAddress(sa) - } - nodeAddress := newDNSAddress("") - if result.Node != nil { - na := r.translateAddressFunc(result.Tenancy.Datacenter, result.Node.Address, - getStringAddressMapFromTaggedAddressMap(result.Node.TaggedAddresses), addrTranslate) - nodeAddress = newDNSAddress(na) - } - return serviceAddress, nodeAddress -} - -// getAnswerExtrasForAddressAndTarget creates the dns answer and extra from nodeAddress and serviceAddress dnsAddress pairs. -func (r *Router) getAnswerExtrasForAddressAndTarget(nodeAddress *dnsAddress, serviceAddress *dnsAddress, req *dns.Msg, - reqCtx Context, result *discovery.Result, port discovery.Port, ttl uint32, remoteAddress net.Addr, - cfg *RouterDynamicConfig, domain string, maxRecursionLevel int) (answer []dns.RR, extra []dns.RR) { - qName := req.Question[0].Name - reqType := parseRequestType(req) - - switch { - case (reqType == requestTypeAddress || result.Type == discovery.ResultTypeVirtual) && - serviceAddress.IsEmptyString() && nodeAddress.IsIP(): - a, e := getAnswerExtrasForIP(qName, nodeAddress, req.Question[0], reqType, result, ttl, domain, &port) - answer = append(answer, a...) - extra = append(extra, e...) - - case result.Type == discovery.ResultTypeNode && nodeAddress.IsIP(): - canonicalNodeName := canonicalNameForResult(result.Type, result.Node.Name, domain, result.Tenancy, port.Name) - a, e := getAnswerExtrasForIP(canonicalNodeName, nodeAddress, req.Question[0], reqType, - result, ttl, domain, &port) - answer = append(answer, a...) - extra = append(extra, e...) - - case result.Type == discovery.ResultTypeNode && !nodeAddress.IsIP(): - a, e := r.makeRecordFromFQDN(result, req, reqCtx, cfg, - ttl, remoteAddress, maxRecursionLevel, serviceAddress.FQDN(), &port) - answer = append(answer, a...) - extra = append(extra, e...) - - case serviceAddress.IsEmptyString() && nodeAddress.IsEmptyString(): - return nil, nil - - // There is no service address and the node address is an IP - case serviceAddress.IsEmptyString() && nodeAddress.IsIP(): - resultType := discovery.ResultTypeNode - if result.Type == discovery.ResultTypeWorkload { - resultType = discovery.ResultTypeWorkload - } - canonicalNodeName := canonicalNameForResult(resultType, result.Node.Name, domain, result.Tenancy, port.Name) - a, e := getAnswerExtrasForIP(canonicalNodeName, nodeAddress, req.Question[0], reqType, result, ttl, domain, &port) - answer = append(answer, a...) - extra = append(extra, e...) - - // There is no service address and the node address is a FQDN (external service) - case serviceAddress.IsEmptyString(): - a, e := r.makeRecordFromFQDN(result, req, reqCtx, cfg, ttl, remoteAddress, maxRecursionLevel, nodeAddress.FQDN(), &port) - answer = append(answer, a...) - extra = append(extra, e...) - - // The service address is an IP - case serviceAddress.IsIP(): - canonicalServiceName := canonicalNameForResult(discovery.ResultTypeService, result.Service.Name, domain, result.Tenancy, port.Name) - a, e := getAnswerExtrasForIP(canonicalServiceName, serviceAddress, req.Question[0], reqType, result, ttl, domain, &port) - answer = append(answer, a...) - extra = append(extra, e...) - - // If the service address is a CNAME for the service we are looking - // for then use the node address. - case serviceAddress.FQDN() == req.Question[0].Name && nodeAddress.IsIP(): - canonicalNodeName := canonicalNameForResult(discovery.ResultTypeNode, result.Node.Name, domain, result.Tenancy, port.Name) - a, e := getAnswerExtrasForIP(canonicalNodeName, nodeAddress, req.Question[0], reqType, result, ttl, domain, &port) - answer = append(answer, a...) - extra = append(extra, e...) - - // The service address is a FQDN (internal or external service name) - default: - a, e := r.makeRecordFromFQDN(result, req, reqCtx, cfg, ttl, remoteAddress, maxRecursionLevel, serviceAddress.FQDN(), &port) - answer = append(answer, a...) - extra = append(extra, e...) - } - - return -} - -// getAnswerAndExtraTXT determines whether a TXT needs to be create and then -// returns the TXT record in the answer or extra depending on the question type. -func getAnswerAndExtraTXT(req *dns.Msg, cfg *RouterDynamicConfig, qName string, - result *discovery.Result, ttl uint32, domain string, query *discovery.Query, port *discovery.Port) (answer []dns.RR, extra []dns.RR) { - if !shouldAppendTXTRecord(query, cfg, req) { - return - } - recordHeaderName := qName - serviceAddress := newDNSAddress("") - if result.Service != nil { - serviceAddress = newDNSAddress(result.Service.Address) - } - if result.Type != discovery.ResultTypeNode && - result.Type != discovery.ResultTypeVirtual && - !serviceAddress.IsInternalFQDN(domain) && - !serviceAddress.IsExternalFQDN(domain) { - recordHeaderName = canonicalNameForResult(discovery.ResultTypeNode, result.Node.Name, - domain, result.Tenancy, port.Name) - } - qType := req.Question[0].Qtype - generateMeta := false - metaInAnswer := false - if qType == dns.TypeANY || qType == dns.TypeTXT { - generateMeta = true - metaInAnswer = true - } else if cfg.NodeMetaTXT { - generateMeta = true - } - - // Do not generate txt records if we don't have to: https://github.com/hashicorp/consul/pull/5272 - if generateMeta { - meta := makeTXTRecord(recordHeaderName, result, ttl) - if metaInAnswer { - answer = append(answer, meta...) - } else { - extra = append(extra, meta...) - } - } - return answer, extra -} - -// shouldAppendTXTRecord determines whether a TXT record should be appended to the response. -func shouldAppendTXTRecord(query *discovery.Query, cfg *RouterDynamicConfig, req *dns.Msg) bool { - qType := req.Question[0].Qtype - switch { - // Node records - case query != nil && query.QueryType == discovery.QueryTypeNode && (cfg.NodeMetaTXT || qType == dns.TypeANY || qType == dns.TypeTXT): - return true - // Service records - case query != nil && query.QueryType == discovery.QueryTypeService && cfg.NodeMetaTXT && qType == dns.TypeSRV: - return true - // Prepared query records - case query != nil && query.QueryType == discovery.QueryTypePreparedQuery && cfg.NodeMetaTXT && qType == dns.TypeSRV: - return true - } - return false -} - -// getAnswerExtrasForIP creates the dns answer and extra from IP dnsAddress pairs. -func getAnswerExtrasForIP(name string, addr *dnsAddress, question dns.Question, - reqType requestType, result *discovery.Result, ttl uint32, domain string, port *discovery.Port) (answer []dns.RR, extra []dns.RR) { - qType := question.Qtype - canReturnARecord := qType == dns.TypeSRV || qType == dns.TypeA || qType == dns.TypeANY || qType == dns.TypeNS || qType == dns.TypeTXT - canReturnAAAARecord := qType == dns.TypeSRV || qType == dns.TypeAAAA || qType == dns.TypeANY || qType == dns.TypeNS || qType == dns.TypeTXT - if reqType != requestTypeAddress && result.Type != discovery.ResultTypeVirtual { - switch { - // check IPV4 - case addr.IsIP() && addr.IsIPV4() && !canReturnARecord, - // check IPV6 - addr.IsIP() && !addr.IsIPV4() && !canReturnAAAARecord: - return - } - } - - // Have to pass original question name here even if the system has recursed - // and stripped off the domain suffix. - recHdrName := question.Name - if qType == dns.TypeSRV { - nameSplit := strings.Split(name, ".") - if len(nameSplit) > 1 && nameSplit[1] == addrLabel { - recHdrName = name - } else { - recHdrName = name - } - name = question.Name - } - - if reqType != requestTypeAddress && qType == dns.TypeSRV { - if result.Type == discovery.ResultTypeService && addr.IsIP() && result.Node.Address != addr.String() { - // encode the ip to be used in the header of the A/AAAA record - // as well as the target of the SRV record. - recHdrName = encodeIPAsFqdn(result, addr.IP(), domain) - } - if result.Type == discovery.ResultTypeWorkload { - recHdrName = canonicalNameForResult(result.Type, result.Node.Name, domain, result.Tenancy, port.Name) - } - srv := makeSRVRecord(name, recHdrName, result, ttl, port) - answer = append(answer, srv) - } - - record := makeIPBasedRecord(recHdrName, addr, ttl) - - isARecordWhenNotExplicitlyQueried := record.Header().Rrtype == dns.TypeA && qType != dns.TypeA && qType != dns.TypeANY - isAAAARecordWhenNotExplicitlyQueried := record.Header().Rrtype == dns.TypeAAAA && qType != dns.TypeAAAA && qType != dns.TypeANY - - // For explicit A/AAAA queries, we must only return those records in the answer section. - if isARecordWhenNotExplicitlyQueried || - isAAAARecordWhenNotExplicitlyQueried { - extra = append(extra, record) - } else { - answer = append(answer, record) - } - - return -} - -// encodeIPAsFqdn encodes an IP address as a FQDN. -func encodeIPAsFqdn(result *discovery.Result, ip net.IP, responseDomain string) string { - ipv4 := ip.To4() - ipStr := hex.EncodeToString(ip) - if ipv4 != nil { - ipStr = ipStr[len(ipStr)-(net.IPv4len*2):] - } - if result.Tenancy.PeerName != "" { - // Exclude the datacenter from the FQDN on the addr for peers. - // This technically makes no difference, since the addr endpoint ignores the DC - // component of the request, but do it anyway for a less confusing experience. - return fmt.Sprintf("%s.addr.%s", ipStr, responseDomain) - } - return fmt.Sprintf("%s.addr.%s.%s", ipStr, result.Tenancy.Datacenter, responseDomain) -} - -func makeSOARecord(domain string, cfg *RouterDynamicConfig) dns.RR { - return &dns.SOA{ - Hdr: dns.RR_Header{ - Name: domain, - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - // Has to be consistent with MinTTL to avoid invalidation - Ttl: cfg.SOAConfig.Minttl, - }, - Ns: "ns." + domain, - Serial: uint32(time.Now().Unix()), - Mbox: "hostmaster." + domain, - Refresh: cfg.SOAConfig.Refresh, - Retry: cfg.SOAConfig.Retry, - Expire: cfg.SOAConfig.Expire, - Minttl: cfg.SOAConfig.Minttl, - } -} - -func makeNSRecord(domain, fqdn string, ttl uint32) dns.RR { - return &dns.NS{ - Hdr: dns.RR_Header{ - Name: domain, - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: ttl, - }, - Ns: fqdn, - } -} - -// makeIPBasedRecord an A or AAAA record for the given name and IP. -// Note: we might want to pass in the Query Name here, which is used in addr. and virtual. queries -// since there is only ever one result. Right now choosing to leave it off for simplification. -func makeIPBasedRecord(name string, addr *dnsAddress, ttl uint32) dns.RR { - - if addr.IsIPV4() { - // check if the query type is A for IPv4 or ANY - return &dns.A{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: ttl, - }, - A: addr.IP(), - } - } - - return &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: ttl, - }, - AAAA: addr.IP(), - } -} - -func (r *Router) makeRecordFromFQDN(result *discovery.Result, req *dns.Msg, reqCtx Context, cfg *RouterDynamicConfig, ttl uint32, remoteAddress net.Addr, maxRecursionLevel int, fqdn string, port *discovery.Port) ([]dns.RR, []dns.RR) { - edns := req.IsEdns0() != nil - q := req.Question[0] - - more := r.resolveCNAME(cfg, dns.Fqdn(fqdn), reqCtx, remoteAddress, maxRecursionLevel) - var additional []dns.RR - extra := 0 -MORE_REC: - for _, rr := range more { - switch rr.Header().Rrtype { - case dns.TypeCNAME, dns.TypeA, dns.TypeAAAA, dns.TypeTXT: - // set the TTL manually - rr.Header().Ttl = ttl - additional = append(additional, rr) - - extra++ - if extra == maxRecurseRecords && !edns { - break MORE_REC - } - } - } - - if q.Qtype == dns.TypeSRV { - answer := makeSRVRecord(q.Name, fqdn, result, ttl, port) - return []dns.RR{answer}, additional - } - - address := "" - if result.Service != nil && result.Service.Address != "" { - address = result.Service.Address - } else if result.Node != nil { - address = result.Node.Address - } - - answers := []dns.RR{ - makeCNAMERecord(q.Name, address, ttl), - } - answers = append(answers, additional...) - - return answers, nil -} - -// makeCNAMERecord returns a CNAME record for the given name and target. -func makeCNAMERecord(name string, target string, ttl uint32) *dns.CNAME { - return &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - Ttl: ttl, - }, - Target: dns.Fqdn(target), - } -} - -// func makeSRVRecord returns an SRV record for the given name and target. -func makeSRVRecord(name, target string, result *discovery.Result, ttl uint32, port *discovery.Port) *dns.SRV { - return &dns.SRV{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - Ttl: ttl, - }, - Priority: 1, - Weight: uint16(result.DNS.Weight), - Port: uint16(port.Number), - Target: target, - } -} - -// encodeKVasRFC1464 encodes a key-value pair according to RFC1464 -func encodeKVasRFC1464(key, value string) (txt string) { - // For details on these replacements c.f. https://www.ietf.org/rfc/rfc1464.txt - key = strings.Replace(key, "`", "``", -1) - key = strings.Replace(key, "=", "`=", -1) - - // Backquote the leading spaces - leadingSpacesRE := regexp.MustCompile("^ +") - numLeadingSpaces := len(leadingSpacesRE.FindString(key)) - key = leadingSpacesRE.ReplaceAllString(key, strings.Repeat("` ", numLeadingSpaces)) - - // Backquote the trailing spaces - numTrailingSpaces := len(trailingSpacesRE.FindString(key)) - key = trailingSpacesRE.ReplaceAllString(key, strings.Repeat("` ", numTrailingSpaces)) - - value = strings.Replace(value, "`", "``", -1) - - return key + "=" + value -} - -// makeTXTRecord returns a TXT record for the given name and result metadata. -func makeTXTRecord(name string, result *discovery.Result, ttl uint32) []dns.RR { - extra := make([]dns.RR, 0, len(result.Metadata)) - for key, value := range result.Metadata { - txt := value - if !strings.HasPrefix(strings.ToLower(key), "rfc1035-") { - txt = encodeKVasRFC1464(key, value) - } - - extra = append(extra, &dns.TXT{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeTXT, - Class: dns.ClassINET, - Ttl: ttl, - }, - Txt: []string{txt}, - }) - } - return extra -} - -// canonicalNameForResult returns the canonical name for a discovery result. -func canonicalNameForResult(resultType discovery.ResultType, target, domain string, - tenancy discovery.ResultTenancy, portName string) string { - switch resultType { - case discovery.ResultTypeService: - if tenancy.Namespace != "" { - return fmt.Sprintf("%s.%s.%s.%s.%s", target, "service", tenancy.Namespace, tenancy.Datacenter, domain) - } - return fmt.Sprintf("%s.%s.%s.%s", target, "service", tenancy.Datacenter, domain) - case discovery.ResultTypeNode: - if tenancy.PeerName != "" && tenancy.Partition != "" { - // We must return a more-specific DNS name for peering so - // that there is no ambiguity with lookups. - // Nodes are always registered in the default namespace, so - // the `.ns` qualifier is not required. - return fmt.Sprintf("%s.node.%s.peer.%s.ap.%s", - target, - tenancy.PeerName, - tenancy.Partition, - domain) - } - if tenancy.PeerName != "" { - // We must return a more-specific DNS name for peering so - // that there is no ambiguity with lookups. - return fmt.Sprintf("%s.node.%s.peer.%s", - target, - tenancy.PeerName, - domain) - } - // Return a simpler format for non-peering nodes. - return fmt.Sprintf("%s.node.%s.%s", target, tenancy.Datacenter, domain) - case discovery.ResultTypeWorkload: - // TODO (v2-dns): it doesn't appear this is being used to return a result. Need to investigate and refactor - if portName != "" { - return fmt.Sprintf("%s.port.%s.workload.%s.ns.%s.ap.%s", portName, target, tenancy.Namespace, tenancy.Partition, domain) - } - return fmt.Sprintf("%s.workload.%s.ns.%s.ap.%s", target, tenancy.Namespace, tenancy.Partition, domain) - } - return "" -} diff --git a/agent/dns/router_query.go b/agent/dns/router_query.go deleted file mode 100644 index bbcbca6698828..0000000000000 --- a/agent/dns/router_query.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dns - -import ( - "net" - "strings" - - "github.com/miekg/dns" - - "github.com/hashicorp/consul/agent/discovery" -) - -// buildQueryFromDNSMessage returns a discovery.Query from a DNS message. -func buildQueryFromDNSMessage(req *dns.Msg, reqCtx Context, domain, altDomain string, - remoteAddress net.Addr) (*discovery.Query, error) { - queryType, queryParts, querySuffixes := getQueryTypePartsAndSuffixesFromDNSMessage(req, domain, altDomain) - - queryTenancy, err := getQueryTenancy(reqCtx, queryType, querySuffixes) - if err != nil { - return nil, err - } - - name, tag := getQueryNameAndTagFromParts(queryType, queryParts) - - portName := parsePort(queryParts) - - switch { - case queryType == discovery.QueryTypeWorkload && req.Question[0].Qtype == dns.TypeSRV: - // Currently we do not support SRV records for workloads - return nil, errNotImplemented - case queryType == discovery.QueryTypeInvalid, name == "": - return nil, errInvalidQuestion - } - - return &discovery.Query{ - QueryType: queryType, - QueryPayload: discovery.QueryPayload{ - Name: name, - Tenancy: queryTenancy, - Tag: tag, - PortName: portName, - SourceIP: getSourceIP(req, queryType, remoteAddress), - }, - }, nil -} - -// getQueryNameAndTagFromParts returns the query name and tag from the query parts that are taken from the original dns question. -func getQueryNameAndTagFromParts(queryType discovery.QueryType, queryParts []string) (string, string) { - n := len(queryParts) - if n == 0 { - return "", "" - } - - switch queryType { - case discovery.QueryTypeService: - // Support RFC 2782 style syntax - if n == 2 && strings.HasPrefix(queryParts[1], "_") && strings.HasPrefix(queryParts[0], "_") { - // Grab the tag since we make nuke it if it's tcp - tag := queryParts[1][1:] - - // Treat _name._tcp.service.consul as a default, no need to filter on that tag - if tag == "tcp" { - tag = "" - } - - name := queryParts[0][1:] - // _name._tag.service.consul - return name, tag - } - return queryParts[n-1], "" - case discovery.QueryTypePreparedQuery: - name := "" - - // If the first and last DNS query parts begin with _, this is an RFC 2782 style SRV lookup. - // This allows for prepared query names to include "." (for backwards compatibility). - // Otherwise, this is a standard prepared query lookup. - if n >= 2 && strings.HasPrefix(queryParts[0], "_") && strings.HasPrefix(queryParts[n-1], "_") { - // The last DNS query part is the protocol field (ignored). - // All prior parts are the prepared query name or ID. - name = strings.Join(queryParts[:n-1], ".") - - // Strip leading underscore - name = name[1:] - } else { - // Allow a "." in the query name, just join all the parts. - name = strings.Join(queryParts, ".") - } - return name, "" - } - return queryParts[n-1], "" -} - -// getQueryTenancy returns a discovery.QueryTenancy from a DNS message. -func getQueryTenancy(reqCtx Context, queryType discovery.QueryType, querySuffixes []string) (discovery.QueryTenancy, error) { - labels, ok := parseLabels(querySuffixes) - if !ok { - return discovery.QueryTenancy{}, errNameNotFound - } - - // If we don't have an explicit partition in the request, try the first fallback - // which was supplied in the request context. The agent's partition will be used as the last fallback - // later in the query processor. - if labels.Partition == "" { - labels.Partition = reqCtx.DefaultPartition - } - - // If we have a sameness group, we can return early without further data massage. - if labels.SamenessGroup != "" { - return discovery.QueryTenancy{ - Namespace: labels.Namespace, - Partition: labels.Partition, - SamenessGroup: labels.SamenessGroup, - Datacenter: reqCtx.DefaultDatacenter, - }, nil - } - - if queryType == discovery.QueryTypeVirtual { - if labels.Peer == "" { - // If the peer name was not explicitly defined, fall back to the ambiguously-parsed version. - labels.Peer = labels.PeerOrDatacenter - } - } - - return discovery.QueryTenancy{ - Namespace: labels.Namespace, - Partition: labels.Partition, - Peer: labels.Peer, - Datacenter: getEffectiveDatacenter(labels, reqCtx.DefaultDatacenter), - }, nil -} - -// getEffectiveDatacenter returns the effective datacenter from the parsed labels. -func getEffectiveDatacenter(labels *parsedLabels, defaultDC string) string { - switch { - case labels.Datacenter != "": - return labels.Datacenter - case labels.PeerOrDatacenter != "" && labels.Peer != labels.PeerOrDatacenter: - return labels.PeerOrDatacenter - } - return defaultDC -} - -// getQueryTypePartsAndSuffixesFromDNSMessage returns the query type, the parts, and suffixes of the query name. -func getQueryTypePartsAndSuffixesFromDNSMessage(req *dns.Msg, domain, altDomain string) (queryType discovery.QueryType, parts []string, suffixes []string) { - // Get the QName without the domain suffix - // TODO (v2-dns): we will also need to handle the "failover" and "no-failover" suffixes here. - // They come AFTER the domain. See `stripSuffix` in router.go - qName := trimDomainFromQuestionName(req.Question[0].Name, domain, altDomain) - - // Split into the label parts - labels := dns.SplitDomainName(qName) - - done := false - for i := len(labels) - 1; i >= 0 && !done; i-- { - queryType = getQueryTypeFromLabels(labels[i]) - switch queryType { - case discovery.QueryTypeService, discovery.QueryTypeWorkload, - discovery.QueryTypeConnect, discovery.QueryTypeVirtual, discovery.QueryTypeIngress, - discovery.QueryTypeNode, discovery.QueryTypePreparedQuery: - parts = labels[:i] - suffixes = labels[i+1:] - done = true - case discovery.QueryTypeInvalid: - fallthrough - default: - // If this is a SRV query the "service" label is optional, we add it back to use the - // existing code-path. - if req.Question[0].Qtype == dns.TypeSRV && strings.HasPrefix(labels[i], "_") { - queryType = discovery.QueryTypeService - parts = labels[:i+1] - suffixes = labels[i+1:] - done = true - } - } - } - - return queryType, parts, suffixes -} - -// trimDomainFromQuestionName returns the question name without the domain suffix. -func trimDomainFromQuestionName(questionName, domain, altDomain string) string { - qName := dns.CanonicalName(questionName) - longer := domain - shorter := altDomain - - if len(shorter) > len(longer) { - longer, shorter = shorter, longer - } - - if strings.HasSuffix(qName, "."+strings.TrimLeft(longer, ".")) { - return strings.TrimSuffix(qName, longer) - } - return strings.TrimSuffix(qName, shorter) -} - -// getQueryTypeFromLabels returns the query type from the labels. -func getQueryTypeFromLabels(label string) discovery.QueryType { - switch label { - case "service": - return discovery.QueryTypeService - case "connect": - return discovery.QueryTypeConnect - case "virtual": - return discovery.QueryTypeVirtual - case "ingress": - return discovery.QueryTypeIngress - case "node": - return discovery.QueryTypeNode - case "query": - return discovery.QueryTypePreparedQuery - case "workload": - return discovery.QueryTypeWorkload - default: - return discovery.QueryTypeInvalid - } -} - -// getSourceIP returns the source IP from the dns request. -func getSourceIP(req *dns.Msg, queryType discovery.QueryType, remoteAddr net.Addr) (sourceIP net.IP) { - if queryType == discovery.QueryTypePreparedQuery { - subnet := ednsSubnetForRequest(req) - - if subnet != nil { - sourceIP = subnet.Address - } else { - switch v := remoteAddr.(type) { - case *net.UDPAddr: - sourceIP = v.IP - case *net.TCPAddr: - sourceIP = v.IP - case *net.IPAddr: - sourceIP = v.IP - } - } - } - return sourceIP -} diff --git a/agent/dns/router_query_test.go b/agent/dns/router_query_test.go deleted file mode 100644 index 59e299fe66105..0000000000000 --- a/agent/dns/router_query_test.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dns - -import ( - "testing" - - "github.com/miekg/dns" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/agent/discovery" -) - -// testCaseBuildQueryFromDNSMessage is a test case for the buildQueryFromDNSMessage function. -type testCaseBuildQueryFromDNSMessage struct { - name string - request *dns.Msg - requestContext *Context - expectedQuery *discovery.Query -} - -// Test_buildQueryFromDNSMessage tests the buildQueryFromDNSMessage function. -func Test_buildQueryFromDNSMessage(t *testing.T) { - - testCases := []testCaseBuildQueryFromDNSMessage{ - // virtual ip queries - { - name: "test A 'virtual.' query", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "db.virtual.consul", // "intentionally missing the trailing dot" - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - expectedQuery: &discovery.Query{ - QueryType: discovery.QueryTypeVirtual, - QueryPayload: discovery.QueryPayload{ - Name: "db", - Tenancy: discovery.QueryTenancy{}, - }, - }, - }, - { - name: "test A 'virtual.' with kitchen sink labels", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "db.virtual.banana.ns.orange.ap.foo.peer.consul", // "intentionally missing the trailing dot" - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - expectedQuery: &discovery.Query{ - QueryType: discovery.QueryTypeVirtual, - QueryPayload: discovery.QueryPayload{ - Name: "db", - Tenancy: discovery.QueryTenancy{ - Peer: "foo", - Namespace: "banana", - Partition: "orange", - }, - }, - }, - }, - { - name: "test A 'virtual.' with implicit peer", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "db.virtual.foo.consul", // "intentionally missing the trailing dot" - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - expectedQuery: &discovery.Query{ - QueryType: discovery.QueryTypeVirtual, - QueryPayload: discovery.QueryPayload{ - Name: "db", - Tenancy: discovery.QueryTenancy{ - Peer: "foo", - }, - }, - }, - }, - { - name: "test A 'virtual.' with implicit peer and namespace query", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "db.virtual.frontend.foo.consul", // "intentionally missing the trailing dot" - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - expectedQuery: &discovery.Query{ - QueryType: discovery.QueryTypeVirtual, - QueryPayload: discovery.QueryPayload{ - Name: "db", - Tenancy: discovery.QueryTenancy{ - Namespace: "frontend", - Peer: "foo", - }, - }, - }, - }, - { - name: "test A 'workload.'", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "foo.workload.consul", // "intentionally missing the trailing dot" - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - expectedQuery: &discovery.Query{ - QueryType: discovery.QueryTypeWorkload, - QueryPayload: discovery.QueryPayload{ - Name: "foo", - Tenancy: discovery.QueryTenancy{}, - }, - }, - }, - { - name: "test A 'workload.' with all possible labels", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "api.port.foo.workload.banana.ns.orange.ap.apple.peer.consul", // "intentionally missing the trailing dot" - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - requestContext: &Context{ - DefaultDatacenter: "default-dc", - DefaultPartition: "default-partition", - }, - expectedQuery: &discovery.Query{ - QueryType: discovery.QueryTypeWorkload, - QueryPayload: discovery.QueryPayload{ - Name: "foo", - PortName: "api", - Tenancy: discovery.QueryTenancy{ - Namespace: "banana", - Partition: "orange", - Peer: "apple", - Datacenter: "default-dc", - }, - }, - }, - }, - { - name: "test sameness group with all possible labels", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "foo.service.apple.sg.banana.ns.orange.ap.consul", // "intentionally missing the trailing dot" - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - requestContext: &Context{ - DefaultDatacenter: "default-dc", - DefaultPartition: "default-partition", - }, - expectedQuery: &discovery.Query{ - QueryType: discovery.QueryTypeService, - QueryPayload: discovery.QueryPayload{ - Name: "foo", - Tenancy: discovery.QueryTenancy{ - Namespace: "banana", - Partition: "orange", - SamenessGroup: "apple", - Datacenter: "default-dc", - }, - }, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - context := tc.requestContext - if context == nil { - context = &Context{} - } - query, err := buildQueryFromDNSMessage(tc.request, *context, "consul.", ".", nil) - require.NoError(t, err) - assert.Equal(t, tc.expectedQuery, query) - }) - } -} diff --git a/agent/dns/router_response.go b/agent/dns/router_response.go deleted file mode 100644 index d2000745c8281..0000000000000 --- a/agent/dns/router_response.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 -package dns - -import ( - "fmt" - "github.com/hashicorp/consul/lib" - "github.com/hashicorp/go-hclog" - "github.com/miekg/dns" - "math" - "strings" -) - -const ( - // UDP can fit ~25 A records in a 512B response, and ~14 AAAA - // records. Limit further to prevent unintentional configuration - // abuse that would have a negative effect on application response - // times. - maxUDPAnswerLimit = 8 - - defaultMaxUDPSize = 512 - - // If a consumer sets a buffer size greater than this amount we will default it down - // to this amount to ensure that consul does respond. Previously if consumer had a larger buffer - // size than 65535 - 60 bytes (maximim 60 bytes for IP header. UDP header will be offset in the - // trimUDP call) consul would fail to respond and the consumer timesout - // the request. - maxUDPDatagramSize = math.MaxUint16 - 68 -) - -// trimDNSResponse will trim the response for UDP and TCP -func trimDNSResponse(cfg *RouterDynamicConfig, network string, req, resp *dns.Msg, logger hclog.Logger) { - var trimmed bool - originalSize := resp.Len() - originalNumRecords := len(resp.Answer) - if network != "tcp" { - trimmed = trimUDPResponse(req, resp, cfg.UDPAnswerLimit) - } else { - trimmed = trimTCPResponse(req, resp) - } - // Flag that there are more records to return in the UDP response - if trimmed { - if cfg.EnableTruncate { - resp.Truncated = true - } - logger.Debug("DNS response too large, truncated", - "protocol", network, - "question", req.Question, - "records", fmt.Sprintf("%d/%d", len(resp.Answer), originalNumRecords), - "size", fmt.Sprintf("%d/%d", resp.Len(), originalSize), - ) - } -} - -// trimTCPResponse limit the MaximumSize of messages to 64k as it is the limit -// of DNS responses -func trimTCPResponse(req, resp *dns.Msg) (trimmed bool) { - hasExtra := len(resp.Extra) > 0 - // There is some overhead, 65535 does not work - maxSize := 65523 // 64k - 12 bytes DNS raw overhead - - // We avoid some function calls and allocations by only handling the - // extra data when necessary. - var index map[string]dns.RR - - // It is not possible to return more than 4k records even with compression - // Since we are performing binary search it is not a big deal, but it - // improves a bit performance, even with binary search - truncateAt := 4096 - if req.Question[0].Qtype == dns.TypeSRV { - // More than 1024 SRV records do not fit in 64k - truncateAt = 1024 - } - if len(resp.Answer) > truncateAt { - resp.Answer = resp.Answer[:truncateAt] - } - if hasExtra { - index = make(map[string]dns.RR, len(resp.Extra)) - indexRRs(resp.Extra, index) - } - truncated := false - - // This enforces the given limit on 64k, the max limit for DNS messages - for len(resp.Answer) > 1 && resp.Len() > maxSize { - truncated = true - // first try to remove the NS section may be it will truncate enough - if len(resp.Ns) != 0 { - resp.Ns = []dns.RR{} - } - // More than 100 bytes, find with a binary search - if resp.Len()-maxSize > 100 { - bestIndex := dnsBinaryTruncate(resp, maxSize, index, hasExtra) - resp.Answer = resp.Answer[:bestIndex] - } else { - resp.Answer = resp.Answer[:len(resp.Answer)-1] - } - if hasExtra { - syncExtra(index, resp) - } - } - - return truncated -} - -// trimUDPResponse makes sure a UDP response is not longer than allowed by RFC -// 1035. Enforce an arbitrary limit that can be further ratcheted down by -// config, and then make sure the response doesn't exceed 512 bytes. Any extra -// records will be trimmed along with answers. -func trimUDPResponse(req, resp *dns.Msg, udpAnswerLimit int) (trimmed bool) { - numAnswers := len(resp.Answer) - hasExtra := len(resp.Extra) > 0 - maxSize := defaultMaxUDPSize - - // Update to the maximum edns size - if edns := req.IsEdns0(); edns != nil { - if size := edns.UDPSize(); size > uint16(maxSize) { - maxSize = int(size) - } - } - // Overriding maxSize as the maxSize cannot be larger than the - // maxUDPDatagram size. Reliability guarantees disappear > than this amount. - if maxSize > maxUDPDatagramSize { - maxSize = maxUDPDatagramSize - } - - // We avoid some function calls and allocations by only handling the - // extra data when necessary. - var index map[string]dns.RR - if hasExtra { - index = make(map[string]dns.RR, len(resp.Extra)) - indexRRs(resp.Extra, index) - } - - // This cuts UDP responses to a useful but limited number of responses. - maxAnswers := lib.MinInt(maxUDPAnswerLimit, udpAnswerLimit) - compress := resp.Compress - if maxSize == defaultMaxUDPSize && numAnswers > maxAnswers { - // We disable computation of Len ONLY for non-eDNS request (512 bytes) - resp.Compress = false - resp.Answer = resp.Answer[:maxAnswers] - if hasExtra { - syncExtra(index, resp) - } - } - if maxSize == defaultMaxUDPSize && numAnswers > maxAnswers { - // We disable computation of Len ONLY for non-eDNS request (512 bytes) - resp.Compress = false - resp.Answer = resp.Answer[:maxAnswers] - if hasExtra { - syncExtra(index, resp) - } - } - - // This enforces the given limit on the number bytes. The default is 512 as - // per the RFC, but EDNS0 allows for the user to specify larger sizes. Note - // that we temporarily switch to uncompressed so that we limit to a response - // that will not exceed 512 bytes uncompressed, which is more conservative and - // will allow our responses to be compliant even if some downstream server - // uncompresses them. - // Even when size is too big for one single record, try to send it anyway - // (useful for 512 bytes messages). 8 is removed from maxSize to ensure that we account - // for the udp header (8 bytes). - for len(resp.Answer) > 1 && resp.Len() > maxSize-8 { - // first try to remove the NS section may be it will truncate enough - if len(resp.Ns) != 0 { - resp.Ns = []dns.RR{} - } - // More than 100 bytes, find with a binary search - if resp.Len()-maxSize > 100 { - bestIndex := dnsBinaryTruncate(resp, maxSize, index, hasExtra) - resp.Answer = resp.Answer[:bestIndex] - } else { - resp.Answer = resp.Answer[:len(resp.Answer)-1] - } - if hasExtra { - syncExtra(index, resp) - } - } - // For 512 non-eDNS responses, while we compute size non-compressed, - // we send result compressed - resp.Compress = compress - return len(resp.Answer) < numAnswers -} - -// syncExtra takes a DNS response message and sets the extra data to the most -// minimal set needed to cover the answer data. A pre-made index of RRs is given -// so that can be re-used between calls. This assumes that the extra data is -// only used to provide info for SRV records. If that's not the case, then this -// will wipe out any additional data. -func syncExtra(index map[string]dns.RR, resp *dns.Msg) { - extra := make([]dns.RR, 0, len(resp.Answer)) - resolved := make(map[string]struct{}, len(resp.Answer)) - for _, ansRR := range resp.Answer { - srv, ok := ansRR.(*dns.SRV) - if !ok { - continue - } - - // Note that we always use lower case when using the index so - // that compares are not case-sensitive. We don't alter the actual - // RRs we add into the extra section, however. - target := strings.ToLower(srv.Target) - - RESOLVE: - if _, ok := resolved[target]; ok { - continue - } - resolved[target] = struct{}{} - - extraRR, ok := index[target] - if ok { - extra = append(extra, extraRR) - if cname, ok := extraRR.(*dns.CNAME); ok { - target = strings.ToLower(cname.Target) - goto RESOLVE - } - } - } - resp.Extra = extra -} - -// dnsBinaryTruncate find the optimal number of records using a fast binary search and return -// it in order to return a DNS answer lower than maxSize parameter. -func dnsBinaryTruncate(resp *dns.Msg, maxSize int, index map[string]dns.RR, hasExtra bool) int { - originalAnswser := resp.Answer - startIndex := 0 - endIndex := len(resp.Answer) + 1 - for endIndex-startIndex > 1 { - median := startIndex + (endIndex-startIndex)/2 - - resp.Answer = originalAnswser[:median] - if hasExtra { - syncExtra(index, resp) - } - aLen := resp.Len() - if aLen <= maxSize { - if maxSize-aLen < 10 { - // We are good, increasing will go out of bounds - return median - } - startIndex = median - } else { - endIndex = median - } - } - return startIndex -} - -// indexRRs populates a map which indexes a given list of RRs by name. NOTE that -// the names are all squashed to lower case so we can perform case-insensitive -// lookups; the RRs are not modified. -func indexRRs(rrs []dns.RR, index map[string]dns.RR) { - for _, rr := range rrs { - name := strings.ToLower(rr.Header().Name) - if _, ok := index[name]; !ok { - index[name] = rr - } - } -} diff --git a/agent/dns/router_service_test.go b/agent/dns/router_service_test.go deleted file mode 100644 index cf78f8687a545..0000000000000 --- a/agent/dns/router_service_test.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dns - -import ( - "net" - "testing" - "time" - - "github.com/miekg/dns" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/agent/discovery" -) - -func Test_HandleRequest_ServiceQuestions(t *testing.T) { - testCases := []HandleTestCase{ - // Service Lookup - { - name: "When no data is return from a query, send SOA", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "foo.service.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything). - Return(nil, discovery.ErrNoData). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - reqType := args.Get(2).(discovery.LookupType) - - require.Equal(t, discovery.LookupTypeService, reqType) - require.Equal(t, "foo", req.Name) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - Rcode: dns.RcodeSuccess, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "foo.service.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Ns: []dns.RR{ - &dns.SOA{ - Hdr: dns.RR_Header{ - Name: "consul.", - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - Ttl: 4, - }, - Ns: "ns.consul.", - Serial: uint32(time.Now().Unix()), - Mbox: "hostmaster.consul.", - Refresh: 1, - Expire: 3, - Retry: 2, - Minttl: 4, - }, - }, - }, - }, - { - // TestDNS_ExternalServiceToConsulCNAMELookup - name: "req type: service / question type: SRV / CNAME required: no", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "alias.service.consul.", - Qtype: dns.TypeSRV, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchEndpoints", mock.Anything, - &discovery.QueryPayload{ - Name: "alias", - Tenancy: discovery.QueryTenancy{}, - }, discovery.LookupTypeService). - Return([]*discovery.Result{ - { - Type: discovery.ResultTypeVirtual, - Service: &discovery.Location{Name: "alias", Address: "web.service.consul"}, - Node: &discovery.Location{Name: "web", Address: "web.service.consul"}, - }, - }, - nil).On("FetchEndpoints", mock.Anything, - &discovery.QueryPayload{ - Name: "web", - Tenancy: discovery.QueryTenancy{}, - }, discovery.LookupTypeService). - Return([]*discovery.Result{ - { - Type: discovery.ResultTypeNode, - Service: &discovery.Location{Name: "web", Address: "webnode"}, - Node: &discovery.Location{Name: "webnode", Address: "127.0.0.2"}, - }, - }, nil).On("ValidateRequest", mock.Anything, - mock.Anything).Return(nil).On("NormalizeRequest", mock.Anything) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "alias.service.consul.", - Qtype: dns.TypeSRV, - }, - }, - Answer: []dns.RR{ - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "alias.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - Ttl: 123, - }, - Target: "web.service.consul.", - Priority: 1, - }, - }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "web.service.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("127.0.0.2"), - }, - }, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - runHandleTestCases(t, tc) - }) - } -} diff --git a/agent/dns/router_test.go b/agent/dns/router_test.go deleted file mode 100644 index c96cf752d17bb..0000000000000 --- a/agent/dns/router_test.go +++ /dev/null @@ -1,3464 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dns - -import ( - "errors" - "fmt" - "net" - "reflect" - "testing" - "time" - - "github.com/hashicorp/consul/internal/dnsutil" - - "github.com/miekg/dns" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/config" - "github.com/hashicorp/consul/agent/discovery" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/resource" -) - -type HandleTestCase struct { - name string - agentConfig *config.RuntimeConfig // This will override the default test Router Config - configureDataFetcher func(fetcher discovery.CatalogDataFetcher) - validateAndNormalizeExpected bool - configureRecursor func(recursor dnsRecursor) - mockProcessorError error - request *dns.Msg - requestContext *Context - remoteAddress net.Addr - response *dns.Msg -} - -var testSOA = &dns.SOA{ - Hdr: dns.RR_Header{ - Name: "consul.", - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - Ttl: 4, - }, - Ns: "ns.consul.", - Mbox: "hostmaster.consul.", - Serial: uint32(time.Now().Unix()), - Refresh: 1, - Retry: 2, - Expire: 3, - Minttl: 4, -} - -func Test_HandleRequest(t *testing.T) { - testCases := []HandleTestCase{ - // recursor queries - { - name: "recursors not configured, non-matching domain", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "google.com", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - // configureRecursor: call not expected. - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Rcode: dns.RcodeRefused, - }, - Question: []dns.Question{ - { - Name: "google.com.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - }, - { - name: "recursors configured, matching domain", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "google.com", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - agentConfig: &config.RuntimeConfig{ - DNSRecursors: []string{"8.8.8.8"}, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - }, - configureRecursor: func(recursor dnsRecursor) { - resp := &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - Rcode: dns.RcodeSuccess, - }, - Question: []dns.Question{ - { - Name: "google.com.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "google.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - } - recursor.(*mockDnsRecursor).On("handle", - mock.Anything, mock.Anything, mock.Anything).Return(resp, nil) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - Rcode: dns.RcodeSuccess, - }, - Question: []dns.Question{ - { - Name: "google.com.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "google.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - }, - }, - { - name: "recursors configured, no matching domain", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "google.com", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - agentConfig: &config.RuntimeConfig{ - DNSRecursors: []string{"8.8.8.8"}, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - }, - configureRecursor: func(recursor dnsRecursor) { - recursor.(*mockDnsRecursor).On("handle", mock.Anything, mock.Anything, mock.Anything). - Return(nil, errRecursionFailed) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: false, - Rcode: dns.RcodeServerFailure, - RecursionAvailable: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "google.com.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - }, - { - name: "recursors configured, unhandled error calling recursors", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "google.com", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - agentConfig: &config.RuntimeConfig{ - DNSRecursors: []string{"8.8.8.8"}, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - }, - configureRecursor: func(recursor dnsRecursor) { - err := errors.New("ahhhhh!!!!") - recursor.(*mockDnsRecursor).On("handle", mock.Anything, mock.Anything, mock.Anything). - Return(nil, err) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: false, - Rcode: dns.RcodeServerFailure, - RecursionAvailable: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "google.com.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - }, - { - name: "recursors configured, the root domain is handled by the recursor", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: ".", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - agentConfig: &config.RuntimeConfig{ - DNSRecursors: []string{"8.8.8.8"}, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - }, - configureRecursor: func(recursor dnsRecursor) { - // this response is modeled after `dig .` - resp := &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - Rcode: dns.RcodeSuccess, - }, - Question: []dns.Question{ - { - Name: ".", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.SOA{ - Hdr: dns.RR_Header{ - Name: ".", - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - Ttl: 86391, - }, - Ns: "a.root-servers.net.", - Serial: 2024012200, - Mbox: "nstld.verisign-grs.com.", - Refresh: 1800, - Retry: 900, - Expire: 604800, - Minttl: 86400, - }, - }, - } - recursor.(*mockDnsRecursor).On("handle", - mock.Anything, mock.Anything, mock.Anything).Return(resp, nil) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - Rcode: dns.RcodeSuccess, - }, - Question: []dns.Question{ - { - Name: ".", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.SOA{ - Hdr: dns.RR_Header{ - Name: ".", - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - Ttl: 86391, - }, - Ns: "a.root-servers.net.", - Serial: 2024012200, - Mbox: "nstld.verisign-grs.com.", - Refresh: 1800, - Retry: 900, - Expire: 604800, - Minttl: 86400, - }, - }, - }, - }, - // addr queries - { - name: "test A 'addr.' query, ipv4 response", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "c000020a.addr.dc1.consul", // "intentionally missing the trailing dot" - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "c000020a.addr.dc1.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "c000020a.addr.dc1.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("192.0.2.10"), - }, - }, - }, - }, - { - name: "test AAAA 'addr.' query, ipv4 response", - // Since we asked for an AAAA record, the A record that resolves from the address is attached as an extra - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "c000020a.addr.dc1.consul", - Qtype: dns.TypeAAAA, - Qclass: dns.ClassINET, - }, - }, - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "c000020a.addr.dc1.consul.", - Qtype: dns.TypeAAAA, - Qclass: dns.ClassINET, - }, - }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "c000020a.addr.dc1.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("192.0.2.10"), - }, - }, - }, - }, - { - name: "test SRV 'addr.' query, ipv4 response", - // Since we asked for a SRV record, the A record that resolves from the address is attached as an extra - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "c000020a.addr.dc1.consul", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "c000020a.addr.dc1.consul.", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "c000020a.addr.dc1.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("192.0.2.10"), - }, - }, - }, - }, - { - name: "test ANY 'addr.' query, ipv4 response", - // The response to ANY should look the same as the A response - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "c000020a.addr.dc1.consul", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "c000020a.addr.dc1.consul.", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "c000020a.addr.dc1.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("192.0.2.10"), - }, - }, - }, - }, - { - name: "test AAAA 'addr.' query, ipv6 response", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "20010db800010002cafe000000001337.addr.dc1.consul", - Qtype: dns.TypeAAAA, - Qclass: dns.ClassINET, - }, - }, - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "20010db800010002cafe000000001337.addr.dc1.consul.", - Qtype: dns.TypeAAAA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: "20010db800010002cafe000000001337.addr.dc1.consul.", - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: 123, - }, - AAAA: net.ParseIP("2001:db8:1:2:cafe::1337"), - }, - }, - }, - }, - { - name: "test A 'addr.' query, ipv6 response", - // Since we asked for an A record, the AAAA record that resolves from the address is attached as an extra - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "20010db800010002cafe000000001337.addr.dc1.consul", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "20010db800010002cafe000000001337.addr.dc1.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Extra: []dns.RR{ - &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: "20010db800010002cafe000000001337.addr.dc1.consul.", - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: 123, - }, - AAAA: net.ParseIP("2001:db8:1:2:cafe::1337"), - }, - }, - }, - }, - { - name: "test SRV 'addr.' query, ipv6 response", - // Since we asked for an SRV record, the AAAA record that resolves from the address is attached as an extra - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "20010db800010002cafe000000001337.addr.dc1.consul", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "20010db800010002cafe000000001337.addr.dc1.consul.", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - Extra: []dns.RR{ - &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: "20010db800010002cafe000000001337.addr.dc1.consul.", - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: 123, - }, - AAAA: net.ParseIP("2001:db8:1:2:cafe::1337"), - }, - }, - }, - }, - { - name: "test ANY 'addr.' query, ipv6 response", - // The response to ANY should look the same as the AAAA response - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "20010db800010002cafe000000001337.addr.dc1.consul", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "20010db800010002cafe000000001337.addr.dc1.consul.", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: "20010db800010002cafe000000001337.addr.dc1.consul.", - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: 123, - }, - AAAA: net.ParseIP("2001:db8:1:2:cafe::1337"), - }, - }, - }, - }, - { - name: "test malformed 'addr.' query", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "c000.addr.dc1.consul", // too short - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Rcode: dns.RcodeNameError, // NXDOMAIN - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "c000.addr.dc1.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Ns: []dns.RR{ - &dns.SOA{ - Hdr: dns.RR_Header{ - Name: "consul.", - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - Ttl: 4, - }, - Ns: "ns.consul.", - Serial: uint32(time.Now().Unix()), - Mbox: "hostmaster.consul.", - Refresh: 1, - Expire: 3, - Retry: 2, - Minttl: 4, - }, - }, - }, - }, - // virtual ip queries - we will test just the A record, since the - // AAAA and SRV records are handled the same way and the complete - // set of addr tests above cover the rest of the cases. - { - name: "test A 'virtual.' query, ipv4 response", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "c000020a.virtual.dc1.consul", // "intentionally missing the trailing dot" - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - fetcher.(*discovery.MockCatalogDataFetcher).On("FetchVirtualIP", - mock.Anything, mock.Anything).Return(&discovery.Result{ - Node: &discovery.Location{Address: "240.0.0.2"}, - Type: discovery.ResultTypeVirtual, - }, nil) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "c000020a.virtual.dc1.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "c000020a.virtual.dc1.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("240.0.0.2"), - }, - }, - }, - }, - { - name: "test A 'virtual.' query, ipv6 response", - // Since we asked for an A record, the AAAA record that resolves from the address is attached as an extra - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "20010db800010002cafe000000001337.virtual.dc1.consul", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - fetcher.(*discovery.MockCatalogDataFetcher).On("FetchVirtualIP", - mock.Anything, mock.Anything).Return(&discovery.Result{ - Node: &discovery.Location{Address: "2001:db8:1:2:cafe::1337"}, - Type: discovery.ResultTypeVirtual, - }, nil) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "20010db800010002cafe000000001337.virtual.dc1.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Extra: []dns.RR{ - &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: "20010db800010002cafe000000001337.virtual.dc1.consul.", - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: 123, - }, - AAAA: net.ParseIP("2001:db8:1:2:cafe::1337"), - }, - }, - }, - }, - // SOA Queries - { - name: "vanilla SOA query", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "consul.", - Qtype: dns.TypeSOA, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything). - Return([]*discovery.Result{ - { - Node: &discovery.Location{Name: "server-one", Address: "1.2.3.4"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - }, - { - Node: &discovery.Location{Name: "server-two", Address: "4.5.6.7"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - }, - }, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - reqType := args.Get(2).(discovery.LookupType) - - require.Equal(t, discovery.LookupTypeService, reqType) - require.Equal(t, structs.ConsulServiceName, req.Name) - require.Equal(t, 3, req.Limit) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "consul.", - Qtype: dns.TypeSOA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.SOA{ - Hdr: dns.RR_Header{ - Name: "consul.", - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - Ttl: 4, - }, - Ns: "ns.consul.", - Serial: uint32(time.Now().Unix()), - Mbox: "hostmaster.consul.", - Refresh: 1, - Expire: 3, - Retry: 2, - Minttl: 4, - }, - }, - Ns: []dns.RR{ - &dns.NS{ - Hdr: dns.RR_Header{ - Name: "consul.", - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 123, - }, - Ns: "server-one.workload.default.ns.default.ap.consul.", - }, - &dns.NS{ - Hdr: dns.RR_Header{ - Name: "consul.", - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 123, - }, - Ns: "server-two.workload.default.ns.default.ap.consul.", - }, - }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "server-one.workload.default.ns.default.ap.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("1.2.3.4"), - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "server-two.workload.default.ns.default.ap.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("4.5.6.7"), - }, - }, - }, - }, - { - name: "SOA query against alternate domain", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "testdomain.", - Qtype: dns.TypeSOA, - Qclass: dns.ClassINET, - }, - }, - }, - agentConfig: &config.RuntimeConfig{ - DNSDomain: "consul", - DNSAltDomain: "testdomain", - DNSNodeTTL: 123 * time.Second, - DNSSOA: config.RuntimeSOAConfig{ - Refresh: 1, - Retry: 2, - Expire: 3, - Minttl: 4, - }, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything). - Return([]*discovery.Result{ - { - Node: &discovery.Location{Name: "server-one", Address: "1.2.3.4"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - }, - { - Node: &discovery.Location{Name: "server-two", Address: "4.5.6.7"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }}, - }, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - reqType := args.Get(2).(discovery.LookupType) - - require.Equal(t, discovery.LookupTypeService, reqType) - require.Equal(t, structs.ConsulServiceName, req.Name) - require.Equal(t, 3, req.Limit) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "testdomain.", - Qtype: dns.TypeSOA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.SOA{ - Hdr: dns.RR_Header{ - Name: "testdomain.", - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - Ttl: 4, - }, - Ns: "ns.testdomain.", - Serial: uint32(time.Now().Unix()), - Mbox: "hostmaster.testdomain.", - Refresh: 1, - Expire: 3, - Retry: 2, - Minttl: 4, - }, - }, - Ns: []dns.RR{ - &dns.NS{ - Hdr: dns.RR_Header{ - Name: "testdomain.", - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 123, - }, - Ns: "server-one.workload.default.ns.default.ap.testdomain.", - }, - &dns.NS{ - Hdr: dns.RR_Header{ - Name: "testdomain.", - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 123, - }, - Ns: "server-two.workload.default.ns.default.ap.testdomain.", - }, - }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "server-one.workload.default.ns.default.ap.testdomain.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("1.2.3.4"), - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "server-two.workload.default.ns.default.ap.testdomain.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("4.5.6.7"), - }, - }, - }, - }, - // NS Queries - { - name: "vanilla NS query", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "consul.", - Qtype: dns.TypeNS, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything). - Return([]*discovery.Result{ - { - Node: &discovery.Location{Name: "server-one", Address: "1.2.3.4"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - }, - { - Node: &discovery.Location{Name: "server-two", Address: "4.5.6.7"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - }, - }, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - reqType := args.Get(2).(discovery.LookupType) - - require.Equal(t, discovery.LookupTypeService, reqType) - require.Equal(t, structs.ConsulServiceName, req.Name) - require.Equal(t, 3, req.Limit) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "consul.", - Qtype: dns.TypeNS, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.NS{ - Hdr: dns.RR_Header{ - Name: "consul.", - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 123, - }, - Ns: "server-one.workload.default.ns.default.ap.consul.", - }, - &dns.NS{ - Hdr: dns.RR_Header{ - Name: "consul.", - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 123, - }, - Ns: "server-two.workload.default.ns.default.ap.consul.", - }, - }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "server-one.workload.default.ns.default.ap.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("1.2.3.4"), - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "server-two.workload.default.ns.default.ap.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("4.5.6.7"), - }, - }, - }, - }, - { - name: "NS query against alternate domain", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "testdomain.", - Qtype: dns.TypeNS, - Qclass: dns.ClassINET, - }, - }, - }, - agentConfig: &config.RuntimeConfig{ - DNSDomain: "consul", - DNSAltDomain: "testdomain", - DNSNodeTTL: 123 * time.Second, - DNSSOA: config.RuntimeSOAConfig{ - Refresh: 1, - Retry: 2, - Expire: 3, - Minttl: 4, - }, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything). - Return([]*discovery.Result{ - { - Node: &discovery.Location{Name: "server-one", Address: "1.2.3.4"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - }, - { - Node: &discovery.Location{Name: "server-two", Address: "4.5.6.7"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - }, - }, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - reqType := args.Get(2).(discovery.LookupType) - - require.Equal(t, discovery.LookupTypeService, reqType) - require.Equal(t, structs.ConsulServiceName, req.Name) - require.Equal(t, 3, req.Limit) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "testdomain.", - Qtype: dns.TypeNS, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.NS{ - Hdr: dns.RR_Header{ - Name: "testdomain.", - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 123, - }, - Ns: "server-one.workload.default.ns.default.ap.testdomain.", - }, - &dns.NS{ - Hdr: dns.RR_Header{ - Name: "testdomain.", - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 123, - }, - Ns: "server-two.workload.default.ns.default.ap.testdomain.", - }, - }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "server-one.workload.default.ns.default.ap.testdomain.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("1.2.3.4"), - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "server-two.workload.default.ns.default.ap.testdomain.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("4.5.6.7"), - }, - }, - }, - }, - // PTR Lookups - { - name: "PTR lookup for node, query type is ANY", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo", Address: "1.2.3.4"}, - Service: &discovery.Location{Name: "bar", Address: "foo"}, - Type: discovery.ResultTypeNode, - Tenancy: discovery.ResultTenancy{ - Datacenter: "dc2", - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchRecordsByIp", mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(net.IP) - - require.NotNil(t, req) - require.Equal(t, "1.2.3.4", req.String()) - }) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa.", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.PTR{ - Hdr: dns.RR_Header{ - Name: "4.3.2.1.in-addr.arpa.", - Rrtype: dns.TypePTR, - Class: dns.ClassINET, - }, - Ptr: "foo.node.dc2.consul.", - }, - }, - }, - }, - { - name: "PTR lookup for IPV6 node", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "b.a.9.8.7.6.5.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa", - Qtype: dns.TypePTR, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo", Address: "2001:db8::567:89ab"}, - Service: &discovery.Location{Name: "web", Address: "foo"}, - Type: discovery.ResultTypeNode, - Tenancy: discovery.ResultTenancy{ - Datacenter: "dc2", - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchRecordsByIp", mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(net.IP) - - require.NotNil(t, req) - require.Equal(t, "2001:db8::567:89ab", req.String()) - }) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "b.a.9.8.7.6.5.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.", - Qtype: dns.TypePTR, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.PTR{ - Hdr: dns.RR_Header{ - Name: "b.a.9.8.7.6.5.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.", - Rrtype: dns.TypePTR, - Class: dns.ClassINET, - }, - Ptr: "foo.node.dc2.consul.", - }, - }, - }, - }, - { - name: "PTR lookup for invalid IP address", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "257.3.2.1.in-addr.arpa", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - Rcode: dns.RcodeNameError, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "257.3.2.1.in-addr.arpa.", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - Ns: []dns.RR{ - &dns.SOA{ - Hdr: dns.RR_Header{ - Name: "consul.", - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - Ttl: 4, - }, - Ns: "ns.consul.", - Serial: uint32(time.Now().Unix()), - Mbox: "hostmaster.consul.", - Refresh: 1, - Expire: 3, - Retry: 2, - Minttl: 4, - }, - }, - }, - }, - { - name: "PTR lookup for invalid subdomain", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "4.3.2.1.blah.arpa", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - Rcode: dns.RcodeNameError, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "4.3.2.1.blah.arpa.", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - Ns: []dns.RR{ - &dns.SOA{ - Hdr: dns.RR_Header{ - Name: "consul.", - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - Ttl: 4, - }, - Ns: "ns.consul.", - Serial: uint32(time.Now().Unix()), - Mbox: "hostmaster.consul.", - Refresh: 1, - Expire: 3, - Retry: 2, - Minttl: 4, - }, - }, - }, - }, - { - name: "[ENT] PTR Lookup for node w/ peer name in default partition, query type is ANY", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo", Address: "1.2.3.4"}, - Type: discovery.ResultTypeNode, - Service: &discovery.Location{Name: "foo-web", Address: "foo"}, - Tenancy: discovery.ResultTenancy{ - Datacenter: "dc2", - PeerName: "peer1", - Partition: "default", - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchRecordsByIp", mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(net.IP) - - require.NotNil(t, req) - require.Equal(t, "1.2.3.4", req.String()) - }) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa.", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.PTR{ - Hdr: dns.RR_Header{ - Name: "4.3.2.1.in-addr.arpa.", - Rrtype: dns.TypePTR, - Class: dns.ClassINET, - }, - Ptr: "foo.node.peer1.peer.default.ap.consul.", - }, - }, - }, - }, - { - name: "[ENT] PTR Lookup for service in default namespace, query type is PTR", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa", - Qtype: dns.TypePTR, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo", Address: "1.2.3.4"}, - Type: discovery.ResultTypeService, - Service: &discovery.Location{Name: "foo", Address: "foo"}, - Tenancy: discovery.ResultTenancy{ - Datacenter: "dc2", - Namespace: "default", - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchRecordsByIp", mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(net.IP) - - require.NotNil(t, req) - require.Equal(t, "1.2.3.4", req.String()) - }) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa.", - Qtype: dns.TypePTR, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.PTR{ - Hdr: dns.RR_Header{ - Name: "4.3.2.1.in-addr.arpa.", - Rrtype: dns.TypePTR, - Class: dns.ClassINET, - }, - Ptr: "foo.service.default.dc2.consul.", - }, - }, - }, - }, - { - name: "[ENT] PTR Lookup for service in a non-default namespace, query type is PTR", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa", - Qtype: dns.TypePTR, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo-node", Address: "1.2.3.4"}, - Type: discovery.ResultTypeService, - Service: &discovery.Location{Name: "foo", Address: "foo"}, - Tenancy: discovery.ResultTenancy{ - Datacenter: "dc2", - Namespace: "bar", - Partition: "baz", - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchRecordsByIp", mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(net.IP) - - require.NotNil(t, req) - require.Equal(t, "1.2.3.4", req.String()) - }) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa.", - Qtype: dns.TypePTR, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.PTR{ - Hdr: dns.RR_Header{ - Name: "4.3.2.1.in-addr.arpa.", - Rrtype: dns.TypePTR, - Class: dns.ClassINET, - }, - Ptr: "foo.service.bar.dc2.consul.", - }, - }, - }, - }, - { - name: "[CE] PTR Lookup for node w/ peer name, query type is ANY", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo", Address: "1.2.3.4"}, - Type: discovery.ResultTypeNode, - Service: &discovery.Location{Name: "foo", Address: "foo"}, - Tenancy: discovery.ResultTenancy{ - Datacenter: "dc2", - PeerName: "peer1", - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchRecordsByIp", mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(net.IP) - - require.NotNil(t, req) - require.Equal(t, "1.2.3.4", req.String()) - }) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa.", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.PTR{ - Hdr: dns.RR_Header{ - Name: "4.3.2.1.in-addr.arpa.", - Rrtype: dns.TypePTR, - Class: dns.ClassINET, - }, - Ptr: "foo.node.peer1.peer.consul.", - }, - }, - }, - }, - { - name: "[CE] PTR Lookup for service, query type is PTR", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa", - Qtype: dns.TypePTR, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo", Address: "1.2.3.4"}, - Service: &discovery.Location{Name: "foo", Address: "foo"}, - Type: discovery.ResultTypeService, - Tenancy: discovery.ResultTenancy{ - Datacenter: "dc2", - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchRecordsByIp", mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(net.IP) - - require.NotNil(t, req) - require.Equal(t, "1.2.3.4", req.String()) - }) - }, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "4.3.2.1.in-addr.arpa.", - Qtype: dns.TypePTR, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.PTR{ - Hdr: dns.RR_Header{ - Name: "4.3.2.1.in-addr.arpa.", - Rrtype: dns.TypePTR, - Class: dns.ClassINET, - }, - Ptr: "foo.service.dc2.consul.", - }, - }, - }, - }, - // V2 Workload Lookup - { - name: "workload A query w/ port, returns A record", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "api.port.foo.workload.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - result := &discovery.Result{ - Node: &discovery.Location{Name: "foo", Address: "1.2.3.4"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{}, - Ports: []discovery.Port{ - { - Name: "api", - Number: 5678, - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchWorkload", mock.Anything, mock.Anything). - Return(result, nil). //TODO - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - - require.Equal(t, "foo", req.Name) - require.Equal(t, "api", req.PortName) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "api.port.foo.workload.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "api.port.foo.workload.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - }, - }, - { - name: "workload ANY query w/o port, returns A record", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "foo.workload.consul.", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - result := &discovery.Result{ - Node: &discovery.Location{Name: "foo", Address: "1.2.3.4"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{}, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchWorkload", mock.Anything, mock.Anything). - Return(result, nil). //TODO - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - - require.Equal(t, "foo", req.Name) - require.Empty(t, req.PortName) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "foo.workload.consul.", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo.workload.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - }, - }, - { - name: "workload A query with namespace, partition, and cluster id; IPV4 address; returns A record", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "foo.workload.bar.ns.baz.ap.dc3.dc.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - result := &discovery.Result{ - Node: &discovery.Location{Name: "foo", Address: "1.2.3.4"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: "bar", - Partition: "baz", - // We currently don't set the datacenter in any of the V2 results. - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchWorkload", mock.Anything, mock.Anything). - Return(result, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - - require.Equal(t, "foo", req.Name) - require.Empty(t, req.PortName) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "foo.workload.bar.ns.baz.ap.dc3.dc.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo.workload.bar.ns.baz.ap.dc3.dc.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - }, - }, - { - name: "workload w/hostname address, ANY query (no recursor)", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "api.port.foo.workload.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - result := &discovery.Result{ - Node: &discovery.Location{Name: "foo", Address: "foo.example.com"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{}, - Ports: []discovery.Port{ - { - Name: "api", - Number: 5678, - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchWorkload", mock.Anything, mock.Anything). - Return(result, nil). //TODO - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - - require.Equal(t, "foo", req.Name) - require.Equal(t, "api", req.PortName) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "api.port.foo.workload.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: "api.port.foo.workload.consul.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - Ttl: 123, - }, - Target: "foo.example.com.", - }, - }, - }, - }, - { - name: "workload w/hostname address, ANY query (w/ recursor)", - // https://datatracker.ietf.org/doc/html/rfc1034#section-3.6.2 both the CNAME and the A record should be in the answer - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "api.port.foo.workload.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - result := &discovery.Result{ - Node: &discovery.Location{Name: "foo", Address: "foo.example.com"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{}, - Ports: []discovery.Port{ - { - Name: "api", - Number: 5678, - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchWorkload", mock.Anything, mock.Anything). - Return(result, nil). //TODO - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - - require.Equal(t, "foo", req.Name) - require.Equal(t, "api", req.PortName) - }) - }, - agentConfig: &config.RuntimeConfig{ - DNSRecursors: []string{"8.8.8.8"}, - DNSDomain: "consul", - DNSNodeTTL: 123 * time.Second, - DNSSOA: config.RuntimeSOAConfig{ - Refresh: 1, - Retry: 2, - Expire: 3, - Minttl: 4, - }, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - }, - configureRecursor: func(recursor dnsRecursor) { - resp := &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - Rcode: dns.RcodeSuccess, - }, - Question: []dns.Question{ - { - Name: "foo.example.com.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo.example.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - } - recursor.(*mockDnsRecursor).On("handle", - mock.Anything, mock.Anything, mock.Anything).Return(resp, nil) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - RecursionAvailable: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "api.port.foo.workload.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: "api.port.foo.workload.consul.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - Ttl: 123, - }, - Target: "foo.example.com.", - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo.example.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - }, - }, - { - name: "workload w/hostname address, CNAME query (w/ recursor)", - // https://datatracker.ietf.org/doc/html/rfc1034#section-3.6.2 only the CNAME should be in the answer - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "api.port.foo.workload.consul.", - Qtype: dns.TypeCNAME, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - result := &discovery.Result{ - Node: &discovery.Location{Name: "foo", Address: "foo.example.com"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{}, - Ports: []discovery.Port{ - { - Name: "api", - Number: 5678, - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchWorkload", mock.Anything, mock.Anything). - Return(result, nil). //TODO - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - - require.Equal(t, "foo", req.Name) - require.Equal(t, "api", req.PortName) - }) - }, - agentConfig: &config.RuntimeConfig{ - DNSRecursors: []string{"8.8.8.8"}, - DNSDomain: "consul", - DNSNodeTTL: 123 * time.Second, - DNSSOA: config.RuntimeSOAConfig{ - Refresh: 1, - Retry: 2, - Expire: 3, - Minttl: 4, - }, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - }, - configureRecursor: func(recursor dnsRecursor) { - resp := &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - Rcode: dns.RcodeSuccess, - }, - Question: []dns.Question{ - { - Name: "foo.example.com.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo.example.com.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - } - recursor.(*mockDnsRecursor).On("handle", - mock.Anything, mock.Anything, mock.Anything).Return(resp, nil) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - RecursionAvailable: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "api.port.foo.workload.consul.", - Qtype: dns.TypeCNAME, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: "api.port.foo.workload.consul.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - Ttl: 123, - }, - Target: "foo.example.com.", - }, - // TODO (v2-dns): this next record is wrong per the RFC-1034 mentioned in the comment above (NET-8060) - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo.example.com.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - Ttl: 123, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - }, - }, - // V2 Services - { - name: "A/AAAA Query a service and return multiple A records", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "foo.service.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo-1", Address: "10.0.0.1"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - Ports: []discovery.Port{ - { - Name: "api", - Number: 5678, - }, - // Intentionally not in the mesh - }, - DNS: discovery.DNSConfig{ - Weight: 2, - }, - }, - { - Node: &discovery.Location{Name: "foo-2", Address: "10.0.0.2"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - Ports: []discovery.Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "mesh", - Number: 21000, - }, - }, - DNS: discovery.DNSConfig{ - Weight: 3, - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - reqType := args.Get(2).(discovery.LookupType) - - require.Equal(t, "foo", req.Name) - require.Empty(t, req.PortName) - require.Equal(t, discovery.LookupTypeService, reqType) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "foo.service.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo.service.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - A: net.ParseIP("10.0.0.1"), - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo.service.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - A: net.ParseIP("10.0.0.2"), - }, - }, - }, - }, - { - name: "SRV Query with a multi-port service return multiple SRV records", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "foo.service.consul.", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo-1", Address: "10.0.0.1"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - Ports: []discovery.Port{ - { - Name: "api", - Number: 5678, - }, - // Intentionally not in the mesh - }, - DNS: discovery.DNSConfig{ - Weight: 2, - }, - }, - { - Node: &discovery.Location{Name: "foo-2", Address: "10.0.0.2"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - Ports: []discovery.Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "mesh", - Number: 21000, - }, - }, - DNS: discovery.DNSConfig{ - Weight: 3, - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - reqType := args.Get(2).(discovery.LookupType) - - require.Equal(t, "foo", req.Name) - require.Empty(t, req.PortName) - require.Equal(t, discovery.LookupTypeService, reqType) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "foo.service.consul.", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "foo.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - Weight: 2, - Priority: 1, - Port: 5678, - Target: "api.port.foo-1.workload.default.ns.default.ap.consul.", - }, - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "foo.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - Weight: 3, - Priority: 1, - Port: 5678, - Target: "api.port.foo-2.workload.default.ns.default.ap.consul.", - }, - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "foo.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - Weight: 3, - Priority: 1, - Port: 21000, - Target: "mesh.port.foo-2.workload.default.ns.default.ap.consul.", - }, - }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "api.port.foo-1.workload.default.ns.default.ap.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - A: net.ParseIP("10.0.0.1"), - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "api.port.foo-2.workload.default.ns.default.ap.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - A: net.ParseIP("10.0.0.2"), - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "mesh.port.foo-2.workload.default.ns.default.ap.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - A: net.ParseIP("10.0.0.2"), - }, - }, - }, - }, - { - name: "SRV Query with a multi-port service where the client requests a specific port, returns SRV and A records", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "mesh.port.foo.service.consul.", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo-2", Address: "10.0.0.2"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - Ports: []discovery.Port{ - { - Name: "mesh", - Number: 21000, - }, - }, - DNS: discovery.DNSConfig{ - Weight: 3, - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - reqType := args.Get(2).(discovery.LookupType) - - require.Equal(t, "foo", req.Name) - require.Equal(t, "mesh", req.PortName) - require.Equal(t, discovery.LookupTypeService, reqType) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "mesh.port.foo.service.consul.", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "mesh.port.foo.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - Weight: 3, - Priority: 1, - Port: 21000, - Target: "mesh.port.foo-2.workload.default.ns.default.ap.consul.", - }, - }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "mesh.port.foo-2.workload.default.ns.default.ap.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - A: net.ParseIP("10.0.0.2"), - }, - }, - }, - }, - { - name: "SRV Query with a multi-port service that has workloads w/ hostnames (no recursors)", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "foo.service.consul.", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo-1", Address: "foo-1.example.com"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - Ports: []discovery.Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "web", - Number: 8080, - }, - }, - DNS: discovery.DNSConfig{ - Weight: 2, - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - reqType := args.Get(2).(discovery.LookupType) - - require.Equal(t, "foo", req.Name) - require.Empty(t, req.PortName) - require.Equal(t, discovery.LookupTypeService, reqType) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "foo.service.consul.", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "foo.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - Weight: 2, - Priority: 1, - Port: 5678, - Target: "foo-1.example.com.", - }, - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "foo.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - Weight: 2, - Priority: 1, - Port: 8080, - Target: "foo-1.example.com.", - }, - }, - }, - }, - { - name: "SRV Query with a multi-port service that has workloads w/ hostnames (no recursor)", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "foo.service.consul.", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - results := []*discovery.Result{ - { - Node: &discovery.Location{Name: "foo-1", Address: "foo-1.example.com"}, - Type: discovery.ResultTypeWorkload, - Tenancy: discovery.ResultTenancy{ - Namespace: resource.DefaultNamespaceName, - Partition: resource.DefaultPartitionName, - }, - Ports: []discovery.Port{ - { - Name: "api", - Number: 5678, - }, - { - Name: "web", - Number: 8080, - }, - }, - DNS: discovery.DNSConfig{ - Weight: 2, - }, - }, - } - - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchEndpoints", mock.Anything, mock.Anything, mock.Anything). - Return(results, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - reqType := args.Get(2).(discovery.LookupType) - - require.Equal(t, "foo", req.Name) - require.Empty(t, req.PortName) - require.Equal(t, discovery.LookupTypeService, reqType) - }) - }, - agentConfig: &config.RuntimeConfig{ - DNSRecursors: []string{"8.8.8.8"}, - DNSDomain: "consul", - DNSNodeTTL: 123 * time.Second, - DNSSOA: config.RuntimeSOAConfig{ - Refresh: 1, - Retry: 2, - Expire: 3, - Minttl: 4, - }, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - }, - configureRecursor: func(recursor dnsRecursor) { - resp := &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - Rcode: dns.RcodeSuccess, - }, - Question: []dns.Question{ - { - Name: "foo-1.example.com.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo-1.example.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - } - recursor.(*mockDnsRecursor).On("handle", - mock.Anything, mock.Anything, mock.Anything).Return(resp, nil) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - RecursionAvailable: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "foo.service.consul.", - Qtype: dns.TypeSRV, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "foo.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - Weight: 2, - Priority: 1, - Port: 5678, - Target: "foo-1.example.com.", - }, - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "foo.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - Weight: 2, - Priority: 1, - Port: 8080, - Target: "foo-1.example.com.", - }, - }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo-1.example.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - A: net.ParseIP("1.2.3.4"), - }, - // TODO (v2-dns): This needs to be de-duplicated (NET-8064) - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo-1.example.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: uint32(123), - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - }, - }, - // V1 Prepared Queries - { - name: "v1 prepared query w/ TTL override, ANY query, returns A record", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "foo.query.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - agentConfig: &config.RuntimeConfig{ - DNSDomain: "consul", - DNSNodeTTL: 123 * time.Second, - DNSSOA: config.RuntimeSOAConfig{ - Refresh: 1, - Retry: 2, - Expire: 3, - Minttl: 4, - }, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - // We shouldn't use this if we have the override defined - DNSServiceTTL: map[string]time.Duration{ - "foo": 1 * time.Second, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchPreparedQuery", mock.Anything, mock.Anything). - Return([]*discovery.Result{ - { - Service: &discovery.Location{Name: "foo", Address: "1.2.3.4"}, - Node: &discovery.Location{Name: "bar", Address: "1.2.3.4"}, - Type: discovery.ResultTypeService, - Tenancy: discovery.ResultTenancy{ - Datacenter: "dc1", - }, - DNS: discovery.DNSConfig{ - TTL: getUint32Ptr(3), - Weight: 1, - }, - }, - }, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - require.Equal(t, "foo", req.Name) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "foo.query.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo.query.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 3, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - }, - }, - { - name: "v1 prepared query w/ matching service TTL, ANY query, returns A record", - request: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - }, - Question: []dns.Question{ - { - Name: "foo.query.dc1.cluster.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - }, - agentConfig: &config.RuntimeConfig{ - DNSDomain: "consul", - DNSNodeTTL: 123 * time.Second, - DNSSOA: config.RuntimeSOAConfig{ - Refresh: 1, - Retry: 2, - Expire: 3, - Minttl: 4, - }, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - // Results should use this as the TTL - DNSServiceTTL: map[string]time.Duration{ - "foo": 1 * time.Second, - }, - }, - configureDataFetcher: func(fetcher discovery.CatalogDataFetcher) { - fetcher.(*discovery.MockCatalogDataFetcher). - On("FetchPreparedQuery", mock.Anything, mock.Anything). - Return([]*discovery.Result{ - { - Service: &discovery.Location{Name: "foo", Address: "1.2.3.4"}, - Node: &discovery.Location{Name: "bar", Address: "1.2.3.4"}, - Type: discovery.ResultTypeService, - Tenancy: discovery.ResultTenancy{ - Datacenter: "dc1", - }, - DNS: discovery.DNSConfig{ - // Intentionally no TTL here. - Weight: 1, - }, - }, - }, nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*discovery.QueryPayload) - require.Equal(t, "foo", req.Name) - require.Equal(t, "dc1", req.Tenancy.Datacenter) - }) - }, - validateAndNormalizeExpected: true, - response: &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "foo.query.dc1.cluster.consul.", - Qtype: dns.TypeA, - Qclass: dns.ClassINET, - }, - }, - Answer: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "foo.query.dc1.cluster.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 1, - }, - A: net.ParseIP("1.2.3.4"), - }, - }, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - runHandleTestCases(t, tc) - }) - } -} - -func runHandleTestCases(t *testing.T, tc HandleTestCase) { - cdf := discovery.NewMockCatalogDataFetcher(t) - if tc.validateAndNormalizeExpected { - cdf.On("ValidateRequest", mock.Anything, mock.Anything).Return(nil) - cdf.On("NormalizeRequest", mock.Anything).Return() - } - - if tc.configureDataFetcher != nil { - tc.configureDataFetcher(cdf) - } - cfg := buildDNSConfig(tc.agentConfig, cdf, tc.mockProcessorError) - - router, err := NewRouter(cfg) - require.NoError(t, err) - - // Replace the recursor with a mock and configure - router.recursor = newMockDnsRecursor(t) - if tc.configureRecursor != nil { - tc.configureRecursor(router.recursor) - } - - ctx := tc.requestContext - if ctx == nil { - ctx = &Context{} - } - - var remoteAddress net.Addr - if tc.remoteAddress != nil { - remoteAddress = tc.remoteAddress - } else { - remoteAddress = &net.UDPAddr{} - } - - actual := router.HandleRequest(tc.request, *ctx, remoteAddress) - require.Equal(t, tc.response, actual) -} - -func TestRouterDynamicConfig_GetTTLForService(t *testing.T) { - type testCase struct { - name string - inputKey string - shouldMatch bool - expectedDuration time.Duration - } - - testCases := []testCase{ - { - name: "strict match", - inputKey: "foo", - shouldMatch: true, - expectedDuration: 1 * time.Second, - }, - { - name: "wildcard match", - inputKey: "bar", - shouldMatch: true, - expectedDuration: 2 * time.Second, - }, - { - name: "wildcard match 2", - inputKey: "bart", - shouldMatch: true, - expectedDuration: 2 * time.Second, - }, - { - name: "no match", - inputKey: "homer", - shouldMatch: false, - expectedDuration: 0 * time.Second, - }, - } - - rtCfg := &config.RuntimeConfig{ - DNSServiceTTL: map[string]time.Duration{ - "foo": 1 * time.Second, - "bar*": 2 * time.Second, - }, - } - cfg, err := getDynamicRouterConfig(rtCfg) - require.NoError(t, err) - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - actual, ok := cfg.getTTLForService(tc.inputKey) - require.Equal(t, tc.shouldMatch, ok) - require.Equal(t, tc.expectedDuration, actual) - }) - } -} -func buildDNSConfig(agentConfig *config.RuntimeConfig, cdf discovery.CatalogDataFetcher, _ error) Config { - cfg := Config{ - AgentConfig: &config.RuntimeConfig{ - DNSDomain: "consul", - DNSNodeTTL: 123 * time.Second, - DNSSOA: config.RuntimeSOAConfig{ - Refresh: 1, - Retry: 2, - Expire: 3, - Minttl: 4, - }, - DNSUDPAnswerLimit: maxUDPAnswerLimit, - }, - EntMeta: acl.EnterpriseMeta{}, - Logger: hclog.NewNullLogger(), - Processor: discovery.NewQueryProcessor(cdf), - TokenFunc: func() string { return "" }, - TranslateServiceAddressFunc: func(dc string, address string, taggedAddresses map[string]structs.ServiceAddress, accept dnsutil.TranslateAddressAccept) string { - return address - }, - TranslateAddressFunc: func(dc string, addr string, taggedAddresses map[string]string, accept dnsutil.TranslateAddressAccept) string { - return addr - }, - } - - if agentConfig != nil { - cfg.AgentConfig = agentConfig - } - - return cfg -} - -// TestDNS_BinaryTruncate tests the dnsBinaryTruncate function. -func TestDNS_BinaryTruncate(t *testing.T) { - msgSrc := new(dns.Msg) - msgSrc.Compress = true - msgSrc.SetQuestion("redis.service.consul.", dns.TypeSRV) - - for i := 0; i < 5000; i++ { - target := fmt.Sprintf("host-redis-%d-%d.test.acme.com.node.dc1.consul.", i/256, i%256) - msgSrc.Answer = append(msgSrc.Answer, &dns.SRV{Hdr: dns.RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: dns.TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target}) - msgSrc.Extra = append(msgSrc.Extra, &dns.CNAME{Hdr: dns.RR_Header{Name: target, Class: 1, Rrtype: dns.TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.%d.%d.", i/256, i%256)}) - } - for _, compress := range []bool{true, false} { - for idx, maxSize := range []int{12, 256, 512, 8192, 65535} { - t.Run(fmt.Sprintf("binarySearch %d", maxSize), func(t *testing.T) { - msg := new(dns.Msg) - msgSrc.Compress = compress - msgSrc.SetQuestion("redis.service.consul.", dns.TypeSRV) - msg.Answer = msgSrc.Answer - msg.Extra = msgSrc.Extra - msg.Ns = msgSrc.Ns - index := make(map[string]dns.RR, len(msg.Extra)) - indexRRs(msg.Extra, index) - blen := dnsBinaryTruncate(msg, maxSize, index, true) - msg.Answer = msg.Answer[:blen] - syncExtra(index, msg) - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted < len(buf) { - t.Fatalf("Bug in DNS library: %d != %d", predicted, len(buf)) - } - if len(buf) > maxSize || (idx != 0 && len(buf) < 16) { - t.Fatalf("bad[%d]: %d > %d", idx, len(buf), maxSize) - } - }) - } - } -} - -// TestDNS_syncExtra tests the syncExtra function. -func TestDNS_syncExtra(t *testing.T) { - resp := &dns.Msg{ - Answer: []dns.RR{ - // These two are on the same host so the redundant extra - // records should get deduplicated. - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Port: 1001, - Target: "ip-10-0-1-185.node.dc1.consul.", - }, - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Port: 1002, - Target: "ip-10-0-1-185.node.dc1.consul.", - }, - // This one isn't in the Consul domain so it will get a - // CNAME and then an A record from the recursor. - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Port: 1003, - Target: "demo.consul.io.", - }, - // This one isn't in the Consul domain and it will get - // a CNAME and A record from a recursor that alters the - // case of the name. This proves we look up in the index - // in a case-insensitive way. - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Port: 1001, - Target: "insensitive.consul.io.", - }, - // This is also a CNAME, but it'll be set up to loop to - // make sure we don't crash. - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Port: 1001, - Target: "deadly.consul.io.", - }, - // This is also a CNAME, but it won't have another record. - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Port: 1001, - Target: "nope.consul.io.", - }, - }, - Extra: []dns.RR{ - // These should get deduplicated. - &dns.A{ - Hdr: dns.RR_Header{ - Name: "ip-10-0-1-185.node.dc1.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("10.0.1.185"), - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "ip-10-0-1-185.node.dc1.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("10.0.1.185"), - }, - // This is a normal CNAME followed by an A record but we - // have flipped the order. The algorithm should emit them - // in the opposite order. - &dns.A{ - Hdr: dns.RR_Header{ - Name: "fakeserver.consul.io.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("127.0.0.1"), - }, - &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: "demo.consul.io.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - }, - Target: "fakeserver.consul.io.", - }, - // These differ in case to test case insensitivity. - &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: "INSENSITIVE.CONSUL.IO.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - }, - Target: "Another.Server.Com.", - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "another.server.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("127.0.0.1"), - }, - // This doesn't appear in the answer, so should get - // dropped. - &dns.A{ - Hdr: dns.RR_Header{ - Name: "ip-10-0-1-186.node.dc1.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("10.0.1.186"), - }, - // These two test edge cases with CNAME handling. - &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: "deadly.consul.io.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - }, - Target: "deadly.consul.io.", - }, - &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: "nope.consul.io.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - }, - Target: "notthere.consul.io.", - }, - }, - } - - index := make(map[string]dns.RR) - indexRRs(resp.Extra, index) - syncExtra(index, resp) - - expected := &dns.Msg{ - Answer: resp.Answer, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "ip-10-0-1-185.node.dc1.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("10.0.1.185"), - }, - &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: "demo.consul.io.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - }, - Target: "fakeserver.consul.io.", - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "fakeserver.consul.io.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("127.0.0.1"), - }, - &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: "INSENSITIVE.CONSUL.IO.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - }, - Target: "Another.Server.Com.", - }, - &dns.A{ - Hdr: dns.RR_Header{ - Name: "another.server.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("127.0.0.1"), - }, - &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: "deadly.consul.io.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - }, - Target: "deadly.consul.io.", - }, - &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: "nope.consul.io.", - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - }, - Target: "notthere.consul.io.", - }, - }, - } - if !reflect.DeepEqual(resp, expected) { - t.Fatalf("Bad %#v vs. %#v", *resp, *expected) - } -} - -// getUint32Ptr return the pointer of an uint32 literal -func getUint32Ptr(i uint32) *uint32 { - return &i -} diff --git a/agent/dns/server.go b/agent/dns/server.go deleted file mode 100644 index 74da3fa663253..0000000000000 --- a/agent/dns/server.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dns - -import ( - "fmt" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/dnsutil" - "net" - - "github.com/miekg/dns" - - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/config" - "github.com/hashicorp/consul/logging" -) - -// DNSRouter is a mock for Router that can be used for testing. -// -//go:generate mockery --name DNSRouter --inpackage -type DNSRouter interface { - HandleRequest(req *dns.Msg, reqCtx Context, remoteAddress net.Addr) *dns.Msg - ServeDNS(w dns.ResponseWriter, req *dns.Msg) - ReloadConfig(newCfg *config.RuntimeConfig) error -} - -// Server is used to expose service discovery queries using a DNS interface. -// It implements the agent.dnsServer interface. -type Server struct { - *dns.Server // Used for setting up listeners - Router DNSRouter // Used to routes and parse DNS requests - - logger hclog.Logger -} - -// Config represent all the DNS configuration required to construct a DNS server. -type Config struct { - AgentConfig *config.RuntimeConfig - EntMeta acl.EnterpriseMeta - Logger hclog.Logger - Processor DiscoveryQueryProcessor - TokenFunc func() string - TranslateAddressFunc func(dc string, addr string, taggedAddresses map[string]string, accept dnsutil.TranslateAddressAccept) string - TranslateServiceAddressFunc func(dc string, address string, taggedAddresses map[string]structs.ServiceAddress, accept dnsutil.TranslateAddressAccept) string -} - -// NewServer creates a new DNS server. -func NewServer(config Config) (*Server, error) { - router, err := NewRouter(config) - if err != nil { - return nil, fmt.Errorf("error creating DNS router: %w", err) - } - - srv := &Server{ - Router: router, - logger: config.Logger.Named(logging.DNS), - } - return srv, nil -} - -// ListenAndServe starts the DNS server. -func (d *Server) ListenAndServe(network, addr string, notif func()) error { - d.Server = &dns.Server{ - Addr: addr, - Net: network, - Handler: d.Router, - NotifyStartedFunc: notif, - } - if network == "udp" { - d.UDPSize = 65535 - } - return d.Server.ListenAndServe() -} - -// ReloadConfig hot-reloads the server config with new parameters under config.RuntimeConfig.DNS* -func (d *Server) ReloadConfig(newCfg *config.RuntimeConfig) error { - return d.Router.ReloadConfig(newCfg) -} - -// Shutdown stops the DNS server. -func (d *Server) Shutdown() { - if d.Server != nil { - d.logger.Info("Stopping server", - "protocol", "DNS", - "address", d.Server.Addr, - "network", d.Server.Net, - ) - err := d.Server.Shutdown() - if err != nil { - d.logger.Error("Error stopping DNS server", "error", err) - } - } -} - -// GetAddr is a function to return the server address if is not nil. -func (d *Server) GetAddr() string { - if d.Server != nil { - return d.Server.Addr - } - return "" -} diff --git a/agent/dns/validation.go b/agent/dns/validation.go new file mode 100644 index 0000000000000..cd66acf6fa8fe --- /dev/null +++ b/agent/dns/validation.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dns + +import ( + "errors" + "regexp" +) + +// matches valid DNS labels according to RFC 1123, should be at most 63 +// characters according to the RFC +var validLabel = regexp.MustCompile(`^[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?$`) + +// IsValidLabel returns true if the string given is a valid DNS label (RFC 1123). +// Note: the only difference between RFC 1035 and RFC 1123 labels is that in +// RFC 1123 labels can begin with a number. +func IsValidLabel(name string) bool { + return validLabel.MatchString(name) +} + +// ValidateLabel is similar to IsValidLabel except it returns an error +// instead of false when name is not a valid DNS label. The error will contain +// reference to what constitutes a valid DNS label. +func ValidateLabel(name string) error { + if !IsValidLabel(name) { + return errors.New("a valid DNS label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character") + } + return nil +} diff --git a/agent/dns/validation_test.go b/agent/dns/validation_test.go new file mode 100644 index 0000000000000..bcb65adf14a23 --- /dev/null +++ b/agent/dns/validation_test.go @@ -0,0 +1,53 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dns_test + +import ( + "testing" + + "github.com/hashicorp/consul/agent/dns" + "github.com/stretchr/testify/require" +) + +func TestValidLabel(t *testing.T) { + cases := map[string]bool{ + "CrEaTeD": true, + "created": true, + "create-deleted": true, + "foo": true, + "": false, + "_foo_": false, + "-foo": false, + "foo-": false, + "-foo-": false, + "-foo-bar-": false, + "no spaces allowed": false, + "thisvaluecontainsalotofcharactersbutnottoomanyandthecaseisatrue": true, // 63 chars + "thisvaluecontainstoomanycharactersandisthusinvalidandtestisfalse": false, // 64 chars + } + + t.Run("*", func(t *testing.T) { + t.Run("IsValidLabel", func(t *testing.T) { + require.False(t, dns.IsValidLabel("*")) + }) + t.Run("ValidateLabel", func(t *testing.T) { + require.Error(t, dns.ValidateLabel("*")) + }) + }) + + for name, expect := range cases { + t.Run(name, func(t *testing.T) { + t.Run("IsValidDNSLabel", func(t *testing.T) { + require.Equal(t, expect, dns.IsValidLabel(name)) + }) + t.Run("ValidateLabel", func(t *testing.T) { + if expect { + require.NoError(t, dns.ValidateLabel(name)) + } else { + require.Error(t, dns.ValidateLabel(name)) + } + }) + }) + } +} diff --git a/agent/dns_catalogv2_test.go b/agent/dns_catalogv2_test.go deleted file mode 100644 index c806e59101f62..0000000000000 --- a/agent/dns_catalogv2_test.go +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package agent - -import ( - "context" - "fmt" - "net" - "testing" - - "github.com/miekg/dns" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/resource" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/hashicorp/consul/testrpc" -) - -// Similar to TestDNS_ServiceLookup, but removes config for features unsupported in v2 and -// tests against DNS v2 and Catalog v2 explicitly using a resource API client. -func TestDNS_CatalogV2_Basic(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - var err error - a := NewTestAgent(t, `experiments=["resource-apis"]`) // v2dns is implicit w/ resource-apis - defer a.Shutdown() - - testrpc.WaitForRaftLeader(t, a.RPC, "dc1") - - client := a.delegate.ResourceServiceClient() - - // Smoke test for `consul-server` service. - readResource(t, client, &pbresource.ID{ - Name: structs.ConsulServiceName, - Type: pbcatalog.ServiceType, - Tenancy: resource.DefaultNamespacedTenancy(), - }, new(pbcatalog.Service)) - - // Register a new service. - dbServiceId := &pbresource.ID{ - Name: "db", - Type: pbcatalog.ServiceType, - Tenancy: resource.DefaultNamespacedTenancy(), - } - emptyServiceId := &pbresource.ID{ - Name: "empty", - Type: pbcatalog.ServiceType, - Tenancy: resource.DefaultNamespacedTenancy(), - } - dbService := &pbcatalog.Service{ - Workloads: &pbcatalog.WorkloadSelector{ - Prefixes: []string{"db-"}, - }, - Ports: []*pbcatalog.ServicePort{ - { - TargetPort: "tcp", - Protocol: pbcatalog.Protocol_PROTOCOL_TCP, - }, - { - TargetPort: "admin", - Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, - }, - { - TargetPort: "mesh", - Protocol: pbcatalog.Protocol_PROTOCOL_MESH, - }, - }, - } - emptyService := &pbcatalog.Service{ - Workloads: &pbcatalog.WorkloadSelector{ - Prefixes: []string{"empty-"}, - }, - Ports: []*pbcatalog.ServicePort{ - { - TargetPort: "tcp", - Protocol: pbcatalog.Protocol_PROTOCOL_TCP, - }, - { - TargetPort: "admin", - Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, - }, - { - TargetPort: "mesh", - Protocol: pbcatalog.Protocol_PROTOCOL_MESH, - }, - }, - } - dbServiceResource := &pbresource.Resource{ - Id: dbServiceId, - Data: toAny(t, dbService), - } - emptyServiceResource := &pbresource.Resource{ - Id: emptyServiceId, - Data: toAny(t, emptyService), - } - for _, r := range []*pbresource.Resource{dbServiceResource, emptyServiceResource} { - _, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: r}) - if err != nil { - t.Fatalf("failed to create the %s service: %v", r.Id.Name, err) - } - } - - // Validate services written. - readResource(t, client, dbServiceId, new(pbcatalog.Service)) - readResource(t, client, emptyServiceId, new(pbcatalog.Service)) - - // Register workloads. - dbWorkloadId1 := &pbresource.ID{ - Name: "db-1", - Type: pbcatalog.WorkloadType, - Tenancy: resource.DefaultNamespacedTenancy(), - } - dbWorkloadId2 := &pbresource.ID{ - Name: "db-2", - Type: pbcatalog.WorkloadType, - Tenancy: resource.DefaultNamespacedTenancy(), - } - dbWorkloadId3 := &pbresource.ID{ - Name: "db-3", - Type: pbcatalog.WorkloadType, - Tenancy: resource.DefaultNamespacedTenancy(), - } - dbWorkloadPorts := map[string]*pbcatalog.WorkloadPort{ - "tcp": { - Port: 12345, - Protocol: pbcatalog.Protocol_PROTOCOL_TCP, - }, - "admin": { - Port: 23456, - Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, - }, - "mesh": { - Port: 20000, - Protocol: pbcatalog.Protocol_PROTOCOL_MESH, - }, - } - dbWorkloadFn := func(ip string) *pbcatalog.Workload { - return &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - { - Host: ip, - }, - }, - Identity: "test-identity", - Ports: dbWorkloadPorts, - } - } - dbWorkload1 := dbWorkloadFn("172.16.1.1") - _, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: &pbresource.Resource{ - Id: dbWorkloadId1, - Data: toAny(t, dbWorkload1), - }}) - if err != nil { - t.Fatalf("failed to create the %s workload: %v", dbWorkloadId1.Name, err) - } - dbWorkload2 := dbWorkloadFn("172.16.1.2") - _, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: &pbresource.Resource{ - Id: dbWorkloadId2, - Data: toAny(t, dbWorkload2), - }}) - if err != nil { - t.Fatalf("failed to create the %s workload: %v", dbWorkloadId2.Name, err) - } - dbWorkload3 := dbWorkloadFn("2001:db8:85a3::8a2e:370:7334") // test IPv6 - _, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: &pbresource.Resource{ - Id: dbWorkloadId3, - Data: toAny(t, dbWorkload3), - }}) - if err != nil { - t.Fatalf("failed to create the %s workload: %v", dbWorkloadId2.Name, err) - } - - // Validate workloads written. - dbWorkloads := make(map[string]*pbcatalog.Workload) - dbWorkloads["db-1"] = readResource(t, client, dbWorkloadId1, new(pbcatalog.Workload)).(*pbcatalog.Workload) - dbWorkloads["db-2"] = readResource(t, client, dbWorkloadId2, new(pbcatalog.Workload)).(*pbcatalog.Workload) - dbWorkloads["db-3"] = readResource(t, client, dbWorkloadId3, new(pbcatalog.Workload)).(*pbcatalog.Workload) - - // Ensure endpoints exist and have health status, which is required for inclusion in DNS results. - retry.Run(t, func(r *retry.R) { - endpoints := readResource(r, client, resource.ReplaceType(pbcatalog.ServiceEndpointsType, dbServiceId), new(pbcatalog.ServiceEndpoints)).(*pbcatalog.ServiceEndpoints) - require.Equal(r, 3, len(endpoints.GetEndpoints())) - for _, e := range endpoints.GetEndpoints() { - require.True(r, - // We only return results for passing and warning health checks. - e.HealthStatus == pbcatalog.Health_HEALTH_PASSING || e.HealthStatus == pbcatalog.Health_HEALTH_WARNING, - fmt.Sprintf("unexpected health status: %v", e.HealthStatus)) - } - }) - - // Test UDP and TCP clients. - for _, client := range []*dns.Client{ - newDNSClient(false), - newDNSClient(true), - } { - // Lookup a service without matching workloads, we should receive an SOA and no answers. - questions := []string{ - "empty.service.consul.", - "_empty._tcp.service.consul.", - } - for _, question := range questions { - for _, dnsType := range []uint16{dns.TypeSRV, dns.TypeA, dns.TypeAAAA} { - m := new(dns.Msg) - m.SetQuestion(question, dnsType) - - in, _, err := client.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - require.Equal(t, 0, len(in.Answer), "Bad: %s", in.String()) - require.Equal(t, 0, len(in.Extra), "Bad: %s", in.String()) - require.Equal(t, 1, len(in.Ns), "Bad: %s", in.String()) - - soaRec, ok := in.Ns[0].(*dns.SOA) - require.True(t, ok, "Bad: %s", in.Ns[0].String()) - require.EqualValues(t, 0, soaRec.Hdr.Ttl, "Bad: %s", in.Ns[0].String()) - } - } - - // Look up the service directly including all ports. - questions = []string{ - "db.service.consul.", - "_db._tcp.service.consul.", // RFC 2782 query. All ports are TCP, so this should return the same result. - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - in, _, err := client.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // This check only runs for a TCP client because a UDP client will truncate the response. - if client.Net == "tcp" { - for portName, port := range dbWorkloadPorts { - for workloadName, workload := range dbWorkloads { - workloadTarget := fmt.Sprintf("%s.port.%s.workload.default.ns.default.ap.consul.", portName, workloadName) - workloadHost := workload.Addresses[0].Host - - srvRec := findSrvAnswerForTarget(t, in, workloadTarget) - require.EqualValues(t, port.Port, srvRec.Port, "Bad: %s", srvRec.String()) - require.EqualValues(t, 0, srvRec.Hdr.Ttl, "Bad: %s", srvRec.String()) - - a := findAorAAAAForName(t, in, in.Extra, workloadTarget) - require.Equal(t, workloadHost, a.AorAAAA.String(), "Bad: %s", a.Original.String()) - require.EqualValues(t, 0, a.Hdr.Ttl, "Bad: %s", a.Original.String()) - } - } - - // Expect 1 result per port, per workload. - require.Equal(t, 9, len(in.Answer), "answer count did not match expected\n\n%s", in.String()) - require.Equal(t, 9, len(in.Extra), "extra answer count did not match expected\n\n%s", in.String()) - } else { - // Expect 1 result per port, per workload, up to the default limit of 3. In practice the results are truncated at 2. - require.Equal(t, 2, len(in.Answer), "answer count did not match expected\n\n%s", in.String()) - require.Equal(t, 2, len(in.Extra), "extra answer count did not match expected\n\n%s", in.String()) - } - } - - // Look up the service directly by each port. - for portName, port := range dbWorkloadPorts { - question := fmt.Sprintf("%s.port.db.service.consul.", portName) - - for workloadName, workload := range dbWorkloads { - workloadTarget := fmt.Sprintf("%s.port.%s.workload.default.ns.default.ap.consul.", portName, workloadName) - workloadHost := workload.Addresses[0].Host - - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - in, _, err := client.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - srvRec := findSrvAnswerForTarget(t, in, workloadTarget) - require.EqualValues(t, port.Port, srvRec.Port, "Bad: %s", srvRec.String()) - require.EqualValues(t, 0, srvRec.Hdr.Ttl, "Bad: %s", srvRec.String()) - - a := findAorAAAAForName(t, in, in.Extra, workloadTarget) - require.Equal(t, workloadHost, a.AorAAAA.String(), "Bad: %s", a.Original.String()) - require.EqualValues(t, 0, a.Hdr.Ttl, "Bad: %s", a.Original.String()) - - // Expect 1 result per port. - require.Equal(t, 3, len(in.Answer), "answer count did not match expected\n\n%s", in.String()) - require.Equal(t, 3, len(in.Extra), "extra answer count did not match expected\n\n%s", in.String()) - } - } - - // Look up A/AAAA by service. - questions = []string{ - "db.service.consul.", - } - for _, question := range questions { - for workloadName, dnsType := range map[string]uint16{ - "db-1": dns.TypeA, - "db-2": dns.TypeA, - "db-3": dns.TypeAAAA, - } { - workload := dbWorkloads[workloadName] - - m := new(dns.Msg) - m.SetQuestion(question, dnsType) - - in, _, err := client.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - workloadHost := workload.Addresses[0].Host - - a := findAorAAAAForAddress(t, in, in.Answer, workloadHost) - require.Equal(t, question, a.Hdr.Name, "Bad: %s", a.Original.String()) - require.EqualValues(t, 0, a.Hdr.Ttl, "Bad: %s", a.Original.String()) - - // Expect 1 answer per workload. For A records, expect 2 answers because there's 2 IPv4 workloads. - if dnsType == dns.TypeA { - require.Equal(t, 2, len(in.Answer), "answer count did not match expected\n\n%s", in.String()) - } else { - require.Equal(t, 1, len(in.Answer), "answer count did not match expected\n\n%s", in.String()) - } - require.Equal(t, 0, len(in.Extra), "extra answer count did not match expected\n\n%s", in.String()) - } - } - - // Lookup a non-existing service, we should receive an SOA. - questions = []string{ - "nodb.service.consul.", - "nope.query.consul.", // prepared query is not supported in v2 - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - in, _, err := client.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - require.Equal(t, 1, len(in.Ns), "Bad: %s", in.String()) - - soaRec, ok := in.Ns[0].(*dns.SOA) - require.True(t, ok, "Bad: %s", in.Ns[0].String()) - require.EqualValues(t, 0, soaRec.Hdr.Ttl, "Bad: %s", in.Ns[0].String()) - } - - // Lookup workloads directly with a port. - for workloadName, dnsType := range map[string]uint16{ - "db-1": dns.TypeA, - "db-2": dns.TypeA, - "db-3": dns.TypeAAAA, - } { - for _, question := range []string{ - fmt.Sprintf("%s.workload.default.ns.default.ap.consul.", workloadName), - fmt.Sprintf("tcp.port.%s.workload.default.ns.default.ap.consul.", workloadName), - fmt.Sprintf("admin.port.%s.workload.default.ns.default.ap.consul.", workloadName), - fmt.Sprintf("mesh.port.%s.workload.default.ns.default.ap.consul.", workloadName), - } { - workload := dbWorkloads[workloadName] - workloadHost := workload.Addresses[0].Host - - m := new(dns.Msg) - m.SetQuestion(question, dnsType) - - in, _, err := client.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - require.Equal(t, 1, len(in.Answer), "Bad: %s", in.String()) - - a := findAorAAAAForName(t, in, in.Answer, question) - require.Equal(t, workloadHost, a.AorAAAA.String(), "Bad: %s", a.Original.String()) - require.EqualValues(t, 0, a.Hdr.Ttl, "Bad: %s", a.Original.String()) - } - } - - // Lookup a non-existing workload, we should receive an NXDOMAIN response. - for _, aType := range []uint16{dns.TypeA, dns.TypeAAAA} { - question := "unknown.workload.consul." - - m := new(dns.Msg) - m.SetQuestion(question, aType) - - in, _, err := client.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - require.Equal(t, 0, len(in.Answer), "Bad: %s", in.String()) - require.Equal(t, dns.RcodeNameError, in.Rcode, "Bad: %s", in.String()) - } - } -} - -func findSrvAnswerForTarget(t *testing.T, in *dns.Msg, target string) *dns.SRV { - t.Helper() - - for _, a := range in.Answer { - srvRec, ok := a.(*dns.SRV) - if ok && srvRec.Target == target { - return srvRec - } - } - t.Fatalf("could not find SRV record for target: %s\n\n%s", target, in.String()) - return nil -} - -func findAorAAAAForName(t *testing.T, in *dns.Msg, rrs []dns.RR, name string) *dnsAOrAAAA { - t.Helper() - - for _, rr := range rrs { - a := newAOrAAAA(t, rr) - if a.Hdr.Name == name { - return a - } - } - t.Fatalf("could not find A/AAAA record for name: %s\n\n%+v", name, in.String()) - return nil -} - -func findAorAAAAForAddress(t *testing.T, in *dns.Msg, rrs []dns.RR, address string) *dnsAOrAAAA { - t.Helper() - - for _, rr := range rrs { - a := newAOrAAAA(t, rr) - if a.AorAAAA.String() == address { - return a - } - } - t.Fatalf("could not find A/AAAA record for address: %s\n\n%+v", address, in.String()) - return nil -} - -func readResource(t retry.TestingTB, client pbresource.ResourceServiceClient, id *pbresource.ID, m proto.Message) proto.Message { - t.Helper() - - retry.Run(t, func(r *retry.R) { - res, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: id}) - if err != nil { - r.Fatalf("err: %v", err) - } - data := res.GetResource() - require.NotEmpty(r, data) - - err = data.Data.UnmarshalTo(m) - require.NoError(r, err) - }) - - return m -} - -func toAny(t retry.TestingTB, m proto.Message) *anypb.Any { - t.Helper() - a, err := anypb.New(m) - if err != nil { - t.Fatalf("could not convert proto to `any` message: %v", err) - } - return a -} - -// dnsAOrAAAA unifies A and AAAA records for simpler testing when the IP type doesn't matter. -type dnsAOrAAAA struct { - Original dns.RR - Hdr dns.RR_Header - AorAAAA net.IP - isAAAA bool -} - -func newAOrAAAA(t *testing.T, rr dns.RR) *dnsAOrAAAA { - t.Helper() - - if aRec, ok := rr.(*dns.A); ok { - return &dnsAOrAAAA{ - Original: rr, - Hdr: aRec.Hdr, - AorAAAA: aRec.A, - isAAAA: false, - } - } - if aRec, ok := rr.(*dns.AAAA); ok { - return &dnsAOrAAAA{ - Original: rr, - Hdr: aRec.Hdr, - AorAAAA: aRec.AAAA, - isAAAA: true, - } - } - - t.Fatalf("Bad A or AAAA record: %#v", rr) - return nil -} - -func newDNSClient(tcp bool) *dns.Client { - c := new(dns.Client) - - // Use TCP to avoid truncation of larger responses and - // sidestep the default UDP size limit of 3 answers - // set by config.DefaultSource() in agent/config/default.go. - if tcp { - c.Net = "tcp" - } - - return c -} diff --git a/agent/dns_ce.go b/agent/dns_ce.go index 4eb74442fa40b..8c055776ed997 100644 --- a/agent/dns_ce.go +++ b/agent/dns_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent @@ -12,10 +13,6 @@ import ( "github.com/hashicorp/consul/agent/config" ) -// NOTE: these functions have also been copied to agent/dns package for dns v2. -// If you change these functions, please also change the ones in agent/dns as well. -// These v1 versions will soon be deprecated. - type enterpriseDNSConfig struct{} func getEnterpriseDNSConfig(conf *config.RuntimeConfig) enterpriseDNSConfig { diff --git a/agent/dns_ce_test.go b/agent/dns_ce_test.go index 44e30285628b2..920568cd3b9bd 100644 --- a/agent/dns_ce_test.go +++ b/agent/dns_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent @@ -9,12 +10,11 @@ import ( "context" "testing" - "github.com/miekg/dns" - "github.com/stretchr/testify/require" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/testrpc" + "github.com/miekg/dns" + "github.com/stretchr/testify/require" ) func TestDNS_CE_PeeredServices(t *testing.T) { @@ -22,164 +22,110 @@ func TestDNS_CE_PeeredServices(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := StartTestAgent(t, TestAgent{HCL: ``, Overrides: `peering = { test_allow_peer_registrations = true } ` + experimentsHCL}) - defer a.Shutdown() - testrpc.WaitForTestAgent(t, a.RPC, "dc1") - - makeReq := func() *structs.RegisterRequest { - return &structs.RegisterRequest{ - PeerName: "peer1", - Datacenter: "dc1", - Node: "peernode1", - Address: "198.18.1.1", - Service: &structs.NodeService{ - PeerName: "peer1", - Kind: structs.ServiceKindConnectProxy, - Service: "web-proxy", - Address: "199.0.0.1", - Port: 12345, - Proxy: structs.ConnectProxyConfig{ - DestinationServiceName: "peer-web", - }, - EnterpriseMeta: *acl.DefaultEnterpriseMeta(), - }, - EnterpriseMeta: *acl.DefaultEnterpriseMeta(), - } - } - - dnsQuery := func(t *testing.T, question string, typ uint16) *dns.Msg { - m := new(dns.Msg) - m.SetQuestion(question, typ) - - c := new(dns.Client) - reply, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - require.Len(t, reply.Answer, 1, "zero valid records found for %q", question) - return reply - } - - assertARec := func(t *testing.T, rec dns.RR, expectName, expectIP string) { - aRec, ok := rec.(*dns.A) - require.True(t, ok, "Extra is not an A record: %T", rec) - require.Equal(t, expectName, aRec.Hdr.Name) - require.Equal(t, expectIP, aRec.A.String()) - } - - assertSRVRec := func(t *testing.T, rec dns.RR, expectName string, expectPort uint16) { - srvRec, ok := rec.(*dns.SRV) - require.True(t, ok, "Answer is not a SRV record: %T", rec) - require.Equal(t, expectName, srvRec.Target) - require.Equal(t, expectPort, srvRec.Port) - } - - t.Run("srv-with-addr-reply", func(t *testing.T) { - require.NoError(t, a.RPC(context.Background(), "Catalog.Register", makeReq(), &struct{}{})) - q := dnsQuery(t, "web-proxy.service.peer1.peer.consul.", dns.TypeSRV) - require.Len(t, q.Answer, 1) - require.Len(t, q.Extra, 1) - - addr := "c7000001.addr.consul." - assertSRVRec(t, q.Answer[0], addr, 12345) - assertARec(t, q.Extra[0], addr, "199.0.0.1") - - // Query the addr to make sure it's also valid. - q = dnsQuery(t, addr, dns.TypeA) - require.Len(t, q.Answer, 1) - require.Len(t, q.Extra, 0) - assertARec(t, q.Answer[0], addr, "199.0.0.1") - }) - - t.Run("srv-with-node-reply", func(t *testing.T) { - req := makeReq() - // Clear service address to trigger node response - req.Service.Address = "" - require.NoError(t, a.RPC(context.Background(), "Catalog.Register", req, &struct{}{})) - q := dnsQuery(t, "web-proxy.service.peer1.peer.consul.", dns.TypeSRV) - require.Len(t, q.Answer, 1) - require.Len(t, q.Extra, 1) - - nodeName := "peernode1.node.peer1.peer.consul." - assertSRVRec(t, q.Answer[0], nodeName, 12345) - assertARec(t, q.Extra[0], nodeName, "198.18.1.1") - - // Query the node to make sure it's also valid. - q = dnsQuery(t, nodeName, dns.TypeA) - require.Len(t, q.Answer, 1) - require.Len(t, q.Extra, 0) - assertARec(t, q.Answer[0], nodeName, "198.18.1.1") - }) - - t.Run("srv-with-fqdn-reply", func(t *testing.T) { - req := makeReq() - // Set non-ip address to trigger external response - req.Address = "localhost" - req.Service.Address = "" - require.NoError(t, a.RPC(context.Background(), "Catalog.Register", req, &struct{}{})) - q := dnsQuery(t, "web-proxy.service.peer1.peer.consul.", dns.TypeSRV) - require.Len(t, q.Answer, 1) - require.Len(t, q.Extra, 0) - assertSRVRec(t, q.Answer[0], "localhost.", 12345) - }) - - t.Run("a-reply", func(t *testing.T) { - require.NoError(t, a.RPC(context.Background(), "Catalog.Register", makeReq(), &struct{}{})) - q := dnsQuery(t, "web-proxy.service.peer1.peer.consul.", dns.TypeA) - require.Len(t, q.Answer, 1) - require.Len(t, q.Extra, 0) - assertARec(t, q.Answer[0], "web-proxy.service.peer1.peer.consul.", "199.0.0.1") - }) - }) + a := StartTestAgent(t, TestAgent{HCL: ``, Overrides: `peering = { test_allow_peer_registrations = true }`}) + defer a.Shutdown() + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + makeReq := func() *structs.RegisterRequest { + return &structs.RegisterRequest{ + PeerName: "peer1", + Datacenter: "dc1", + Node: "peernode1", + Address: "198.18.1.1", + Service: &structs.NodeService{ + PeerName: "peer1", + Kind: structs.ServiceKindConnectProxy, + Service: "web-proxy", + Address: "199.0.0.1", + Port: 12345, + Proxy: structs.ConnectProxyConfig{ + DestinationServiceName: "peer-web", + }, + EnterpriseMeta: *acl.DefaultEnterpriseMeta(), + }, + EnterpriseMeta: *acl.DefaultEnterpriseMeta(), + } } -} -func getTestCasesParseLocality() []testCaseParseLocality { - testCases := []testCaseParseLocality{ - { - name: "test [..dc]", - labels: []string{"test-dc", "dc"}, - enterpriseDNSConfig: enterpriseDNSConfig{}, - expectedResult: queryLocality{ - EnterpriseMeta: acl.EnterpriseMeta{}, - datacenter: "test-dc", - }, - expectedOK: true, - }, - { - name: "test [..peer]", - labels: []string{"test-peer", "peer"}, - enterpriseDNSConfig: enterpriseDNSConfig{}, - expectedResult: queryLocality{ - EnterpriseMeta: acl.EnterpriseMeta{}, - peer: "test-peer", - }, - expectedOK: true, - }, - { - name: "test 1 label", - labels: []string{"test-peer"}, - enterpriseDNSConfig: enterpriseDNSConfig{}, - expectedResult: queryLocality{ - EnterpriseMeta: acl.EnterpriseMeta{}, - peerOrDatacenter: "test-peer", - }, - expectedOK: true, - }, - { - name: "test 0 labels", - labels: []string{}, - enterpriseDNSConfig: enterpriseDNSConfig{}, - expectedResult: queryLocality{}, - expectedOK: true, - }, - { - name: "test 3 labels returns not found", - labels: []string{"test-dc", "dc", "test-blah"}, - enterpriseDNSConfig: enterpriseDNSConfig{}, - expectedResult: queryLocality{}, - expectedOK: false, - }, + dnsQuery := func(t *testing.T, question string, typ uint16) *dns.Msg { + m := new(dns.Msg) + m.SetQuestion(question, typ) + + c := new(dns.Client) + reply, _, err := c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + require.Len(t, reply.Answer, 1, "zero valid records found for %q", question) + return reply + } + + assertARec := func(t *testing.T, rec dns.RR, expectName, expectIP string) { + aRec, ok := rec.(*dns.A) + require.True(t, ok, "Extra is not an A record: %T", rec) + require.Equal(t, expectName, aRec.Hdr.Name) + require.Equal(t, expectIP, aRec.A.String()) + } + + assertSRVRec := func(t *testing.T, rec dns.RR, expectName string, expectPort uint16) { + srvRec, ok := rec.(*dns.SRV) + require.True(t, ok, "Answer is not a SRV record: %T", rec) + require.Equal(t, expectName, srvRec.Target) + require.Equal(t, expectPort, srvRec.Port) } - return testCases + + t.Run("srv-with-addr-reply", func(t *testing.T) { + require.NoError(t, a.RPC(context.Background(), "Catalog.Register", makeReq(), &struct{}{})) + q := dnsQuery(t, "web-proxy.service.peer1.peer.consul.", dns.TypeSRV) + require.Len(t, q.Answer, 1) + require.Len(t, q.Extra, 1) + + addr := "c7000001.addr.consul." + assertSRVRec(t, q.Answer[0], addr, 12345) + assertARec(t, q.Extra[0], addr, "199.0.0.1") + + // Query the addr to make sure it's also valid. + q = dnsQuery(t, addr, dns.TypeA) + require.Len(t, q.Answer, 1) + require.Len(t, q.Extra, 0) + assertARec(t, q.Answer[0], addr, "199.0.0.1") + }) + + t.Run("srv-with-node-reply", func(t *testing.T) { + req := makeReq() + // Clear service address to trigger node response + req.Service.Address = "" + require.NoError(t, a.RPC(context.Background(), "Catalog.Register", req, &struct{}{})) + q := dnsQuery(t, "web-proxy.service.peer1.peer.consul.", dns.TypeSRV) + require.Len(t, q.Answer, 1) + require.Len(t, q.Extra, 1) + + nodeName := "peernode1.node.peer1.peer.consul." + assertSRVRec(t, q.Answer[0], nodeName, 12345) + assertARec(t, q.Extra[0], nodeName, "198.18.1.1") + + // Query the node to make sure it's also valid. + q = dnsQuery(t, nodeName, dns.TypeA) + require.Len(t, q.Answer, 1) + require.Len(t, q.Extra, 0) + assertARec(t, q.Answer[0], nodeName, "198.18.1.1") + }) + + t.Run("srv-with-fqdn-reply", func(t *testing.T) { + req := makeReq() + // Set non-ip address to trigger external response + req.Address = "localhost" + req.Service.Address = "" + require.NoError(t, a.RPC(context.Background(), "Catalog.Register", req, &struct{}{})) + q := dnsQuery(t, "web-proxy.service.peer1.peer.consul.", dns.TypeSRV) + require.Len(t, q.Answer, 1) + require.Len(t, q.Extra, 0) + assertSRVRec(t, q.Answer[0], "localhost.", 12345) + }) + + t.Run("a-reply", func(t *testing.T) { + require.NoError(t, a.RPC(context.Background(), "Catalog.Register", makeReq(), &struct{}{})) + q := dnsQuery(t, "web-proxy.service.peer1.peer.consul.", dns.TypeA) + require.Len(t, q.Answer, 1) + require.Len(t, q.Extra, 0) + assertARec(t, q.Answer[0], "web-proxy.service.peer1.peer.consul.", "199.0.0.1") + }) } diff --git a/agent/dns_node_lookup_test.go b/agent/dns_node_lookup_test.go deleted file mode 100644 index 8dbb3d407e660..0000000000000 --- a/agent/dns_node_lookup_test.go +++ /dev/null @@ -1,704 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package agent - -import ( - "context" - "testing" - - "github.com/miekg/dns" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/testrpc" -) - -func TestDNS_NodeLookup(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - TaggedAddresses: map[string]string{ - "wan": "127.0.0.2", - }, - NodeMeta: map[string]string{ - "key": "value", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("foo.node.consul.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - require.Len(t, in.Answer, 2) - require.Len(t, in.Extra, 0) - - aRec, ok := in.Answer[0].(*dns.A) - require.True(t, ok, "First answer is not an A record") - require.Equal(t, "127.0.0.1", aRec.A.String()) - require.Equal(t, uint32(0), aRec.Hdr.Ttl) - - txt, ok := in.Answer[1].(*dns.TXT) - require.True(t, ok, "Second answer is not a TXT record") - require.Len(t, txt.Txt, 1) - require.Equal(t, "key=value", txt.Txt[0]) - - // Re-do the query, but only for an A RR - - m = new(dns.Msg) - m.SetQuestion("foo.node.consul.", dns.TypeA) - - c = new(dns.Client) - in, _, err = c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - require.Len(t, in.Answer, 1) - require.Len(t, in.Extra, 1) - - aRec, ok = in.Answer[0].(*dns.A) - require.True(t, ok, "Answer is not an A record") - require.Equal(t, "127.0.0.1", aRec.A.String()) - require.Equal(t, uint32(0), aRec.Hdr.Ttl) - - txt, ok = in.Extra[0].(*dns.TXT) - require.True(t, ok, "Extra record is not a TXT record") - require.Len(t, txt.Txt, 1) - require.Equal(t, "key=value", txt.Txt[0]) - - // Re-do the query, but specify the DC - m = new(dns.Msg) - m.SetQuestion("foo.node.dc1.consul.", dns.TypeANY) - - c = new(dns.Client) - in, _, err = c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - require.Len(t, in.Answer, 2) - require.Len(t, in.Extra, 0) - - aRec, ok = in.Answer[0].(*dns.A) - require.True(t, ok, "First answer is not an A record") - require.Equal(t, "127.0.0.1", aRec.A.String()) - require.Equal(t, uint32(0), aRec.Hdr.Ttl) - - _, ok = in.Answer[1].(*dns.TXT) - require.True(t, ok, "Second answer is not a TXT record") - - // lookup a non-existing node, we should receive a SOA - m = new(dns.Msg) - m.SetQuestion("nofoo.node.dc1.consul.", dns.TypeANY) - - c = new(dns.Client) - in, _, err = c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - require.Len(t, in.Ns, 1) - soaRec, ok := in.Ns[0].(*dns.SOA) - require.True(t, ok, "NS RR is not a SOA record") - require.Equal(t, uint32(0), soaRec.Hdr.Ttl) - }) - } -} - -func TestDNS_NodeLookup_CaseInsensitive(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "Foo", - Address: "127.0.0.1", - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("fOO.node.dc1.consul.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("empty lookup: %#v", in) - } - }) - } -} - -// V2 DNS does not support node names with a period. This will be deprecated. -func TestDNS_NodeLookup_PeriodName(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(false) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node with period in name - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo.bar", - Address: "127.0.0.1", - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("foo.bar.node.consul.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - aRec, ok := in.Answer[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - }) - } -} - -func TestDNS_NodeLookup_AAAA(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "::4242:4242", - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("bar.node.consul.", dns.TypeAAAA) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - aRec, ok := in.Answer[0].(*dns.AAAA) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aRec.AAAA.String() != "::4242:4242" { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - }) - } -} - -func TestDNS_NodeLookup_CNAME(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - recursor := makeRecursor(t, dns.Msg{ - Answer: []dns.RR{ - dnsCNAME("www.google.com", "google.com"), - dnsA("google.com", "1.2.3.4"), - dnsTXT("google.com", []string{"my_txt_value"}), - }, - }) - defer recursor.Shutdown() - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - recursors = ["`+recursor.Addr+`"] - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "google", - Address: "www.google.com", - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("google.node.consul.", dns.TypeANY) - m.SetEdns0(8192, true) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - wantAnswer := []dns.RR{ - &dns.CNAME{ - Hdr: dns.RR_Header{Name: "google.node.consul.", Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Ttl: 0, Rdlength: 0x10}, - Target: "www.google.com.", - }, - &dns.CNAME{ - Hdr: dns.RR_Header{Name: "www.google.com.", Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Rdlength: 0x2}, - Target: "google.com.", - }, - &dns.A{ - Hdr: dns.RR_Header{Name: "google.com.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, - A: []byte{0x1, 0x2, 0x3, 0x4}, // 1.2.3.4 - }, - &dns.TXT{ - Hdr: dns.RR_Header{Name: "google.com.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xd}, - Txt: []string{"my_txt_value"}, - }, - } - require.Equal(t, wantAnswer, in.Answer) - }) - } -} - -func TestDNS_NodeLookup_TXT(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "google", - Address: "127.0.0.1", - NodeMeta: map[string]string{ - "rfc1035-00": "value0", - "key0": "value1", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("google.node.consul.", dns.TypeTXT) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should have the 1 TXT record reply - if len(in.Answer) != 2 { - t.Fatalf("Bad: %#v", in) - } - - txtRec, ok := in.Answer[0].(*dns.TXT) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if len(txtRec.Txt) != 1 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if txtRec.Txt[0] != "value0" && txtRec.Txt[0] != "key0=value1" { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - }) - } -} - -func TestDNS_NodeLookup_TXT_DontSuppress(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, `dns_config = { enable_additional_node_meta_txt = false } `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "google", - Address: "127.0.0.1", - NodeMeta: map[string]string{ - "rfc1035-00": "value0", - "key0": "value1", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("google.node.consul.", dns.TypeTXT) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should have the 1 TXT record reply - if len(in.Answer) != 2 { - t.Fatalf("Bad: %#v", in) - } - - txtRec, ok := in.Answer[0].(*dns.TXT) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if len(txtRec.Txt) != 1 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if txtRec.Txt[0] != "value0" && txtRec.Txt[0] != "key0=value1" { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - }) - } -} - -func TestDNS_NodeLookup_ANY(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.1", - NodeMeta: map[string]string{ - "key": "value", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("bar.node.consul.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - wantAnswer := []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, - A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 - }, - &dns.TXT{ - Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xa}, - Txt: []string{"key=value"}, - }, - } - require.Equal(t, wantAnswer, in.Answer) - }) - } -} - -func TestDNS_NodeLookup_ANY_DontSuppressTXT(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, `dns_config = { enable_additional_node_meta_txt = false } `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.1", - NodeMeta: map[string]string{ - "key": "value", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("bar.node.consul.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - wantAnswer := []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, - A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 - }, - &dns.TXT{ - Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xa}, - Txt: []string{"key=value"}, - }, - } - require.Equal(t, wantAnswer, in.Answer) - }) - } -} - -func TestDNS_NodeLookup_A_SuppressTXT(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, `dns_config = { enable_additional_node_meta_txt = false } `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.1", - NodeMeta: map[string]string{ - "key": "value", - }, - } - - var out struct{} - require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) - - m := new(dns.Msg) - m.SetQuestion("bar.node.consul.", dns.TypeA) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - - wantAnswer := []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, - A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 - }, - } - require.Equal(t, wantAnswer, in.Answer) - - // ensure TXT RR suppression - require.Len(t, in.Extra, 0) - }) - } -} - -func TestDNS_NodeLookup_TTL(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - recursor := makeRecursor(t, dns.Msg{ - Answer: []dns.RR{ - dnsCNAME("www.google.com", "google.com"), - dnsA("google.com", "1.2.3.4"), - }, - }) - defer recursor.Shutdown() - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - recursors = ["`+recursor.Addr+`"] - dns_config { - node_ttl = "10s" - allow_stale = true - max_stale = "1s" - } - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("foo.node.consul.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - aRec, ok := in.Answer[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aRec.Hdr.Ttl != 10 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - // Register node with IPv6 - args = &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "::4242:4242", - } - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Check an IPv6 record - m = new(dns.Msg) - m.SetQuestion("bar.node.consul.", dns.TypeANY) - - in, _, err = c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - aaaaRec, ok := in.Answer[0].(*dns.AAAA) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aaaaRec.AAAA.String() != "::4242:4242" { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aaaaRec.Hdr.Ttl != 10 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - // Register node with CNAME - args = &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "google", - Address: "www.google.com", - } - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m = new(dns.Msg) - m.SetQuestion("google.node.consul.", dns.TypeANY) - - in, _, err = c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should have the CNAME record + a few A records - if len(in.Answer) < 2 { - t.Fatalf("Bad: %#v", in) - } - - cnRec, ok := in.Answer[0].(*dns.CNAME) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if cnRec.Target != "www.google.com." { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if cnRec.Hdr.Ttl != 10 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - }) - } -} diff --git a/agent/dns_reverse_lookup_test.go b/agent/dns_reverse_lookup_test.go deleted file mode 100644 index 6a7cc49456479..0000000000000 --- a/agent/dns_reverse_lookup_test.go +++ /dev/null @@ -1,476 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package agent - -import ( - "context" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/testrpc" - "github.com/miekg/dns" - "github.com/stretchr/testify/require" - "testing" -) - -func TestDNS_ReverseLookup(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo2", - Address: "127.0.0.2", - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - ptrRec, ok := in.Answer[0].(*dns.PTR) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if ptrRec.Ptr != "foo2.node.dc1.consul." { - t.Fatalf("Bad: %#v", ptrRec) - } - }) - } -} - -func TestDNS_ReverseLookup_CustomDomain(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - domain = "custom" - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo2", - Address: "127.0.0.2", - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - ptrRec, ok := in.Answer[0].(*dns.PTR) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if ptrRec.Ptr != "foo2.node.dc1.custom." { - t.Fatalf("Bad: %#v", ptrRec) - } - }) - } -} - -func TestDNS_ReverseLookup_IPV6(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "::4242:4242", - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("2.4.2.4.2.4.2.4.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - ptrRec, ok := in.Answer[0].(*dns.PTR) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if ptrRec.Ptr != "bar.node.dc1.consul." { - t.Fatalf("Bad: %#v", ptrRec) - } - }) - } -} - -func TestDNS_Compression_ReverseLookup(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node. - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo2", - Address: "127.0.0.2", - } - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY) - - conn, err := dns.Dial("udp", a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Do a manual exchange with compression on (the default). - if err := conn.WriteMsg(m); err != nil { - t.Fatalf("err: %v", err) - } - p := make([]byte, dns.MaxMsgSize) - compressed, err := conn.Read(p) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Disable compression and try again. - a.DNSDisableCompression(true) - if err := conn.WriteMsg(m); err != nil { - t.Fatalf("err: %v", err) - } - unc, err := conn.Read(p) - if err != nil { - t.Fatalf("err: %v", err) - } - - // We can't see the compressed status given the DNS API, so we just make - // sure the message is smaller to see if it's respecting the flag. - if compressed == 0 || unc == 0 || compressed >= unc { - t.Fatalf("doesn't look compressed: %d vs. %d", compressed, unc) - } - }) - } -} - -func TestDNS_ServiceReverseLookup(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - Address: "127.0.0.2", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - m := new(dns.Msg) - m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - ptrRec, ok := in.Answer[0].(*dns.PTR) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if ptrRec.Ptr != serviceCanonicalDNSName("db", "service", "dc1", "consul", nil)+"." { - t.Fatalf("Bad: %#v", ptrRec) - } - }) - } -} - -func TestDNS_ServiceReverseLookup_IPV6(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "2001:db8::1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - Address: "2001:db8::ff00:42:8329", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - m := new(dns.Msg) - m.SetQuestion("9.2.3.8.2.4.0.0.0.0.f.f.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - ptrRec, ok := in.Answer[0].(*dns.PTR) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if ptrRec.Ptr != serviceCanonicalDNSName("db", "service", "dc1", "consul", nil)+"." { - t.Fatalf("Bad: %#v", ptrRec) - } - }) - } -} - -func TestDNS_ServiceReverseLookup_CustomDomain(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - domain = "custom" - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - Address: "127.0.0.2", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - m := new(dns.Msg) - m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - ptrRec, ok := in.Answer[0].(*dns.PTR) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if ptrRec.Ptr != serviceCanonicalDNSName("db", "service", "dc1", "custom", nil)+"." { - t.Fatalf("Bad: %#v", ptrRec) - } - }) - } -} - -func TestDNS_ServiceReverseLookupNodeAddress(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - Address: "127.0.0.1", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - m := new(dns.Msg) - m.SetQuestion("1.0.0.127.in-addr.arpa.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - ptrRec, ok := in.Answer[0].(*dns.PTR) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if ptrRec.Ptr != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", ptrRec) - } - }) - } -} - -func TestDNS_ReverseLookup_NotFound(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - // do not configure recursors - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Do not register any nodes - m := new(dns.Msg) - qName := "2.0.0.127.in-addr.arpa." - m.SetQuestion(qName, dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - require.Nil(t, in.Answer) - require.Nil(t, in.Extra) - - require.Equal(t, dns.RcodeNameError, in.Rcode) - - question := in.Question[0] - require.Equal(t, qName, question.Name) - require.Equal(t, dns.TypeANY, question.Qtype) - require.Equal(t, uint16(dns.ClassINET), question.Qclass) - - soa, ok := in.Ns[0].(*dns.SOA) - require.True(t, ok) - require.Equal(t, "ns.consul.", soa.Ns) - require.Equal(t, "hostmaster.consul.", soa.Mbox) - }) - } -} diff --git a/agent/dns_service_lookup_test.go b/agent/dns_service_lookup_test.go deleted file mode 100644 index 7905100aa66c6..0000000000000 --- a/agent/dns_service_lookup_test.go +++ /dev/null @@ -1,3893 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package agent - -import ( - "context" - "fmt" - "net" - "sort" - "strings" - "testing" - - "github.com/miekg/dns" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/hashicorp/consul/testrpc" -) - -func TestDNS_ServiceLookupNoMultiCNAME(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "198.18.0.1", - Service: &structs.NodeService{ - Service: "db", - Port: 12345, - Address: "foo.node.consul", - }, - } - - var out struct{} - require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) - } - - // Register a second node node with the same service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "198.18.0.2", - Service: &structs.NodeService{ - Service: "db", - Port: 12345, - Address: "bar.node.consul", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - m := new(dns.Msg) - m.SetQuestion("db.service.consul.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - - // expect a CNAME and an A RR - require.Len(t, in.Answer, 2) - require.IsType(t, &dns.CNAME{}, in.Answer[0]) - require.IsType(t, &dns.A{}, in.Answer[1]) - }) - } -} - -func TestDNS_ServiceLookupPreferNoCNAME(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "198.18.0.1", - Service: &structs.NodeService{ - Service: "db", - Port: 12345, - Address: "198.18.0.1", - }, - } - - var out struct{} - require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) - } - - // Register a second node node with the same service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "198.18.0.2", - Service: &structs.NodeService{ - Service: "db", - Port: 12345, - Address: "bar.node.consul", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - m := new(dns.Msg) - m.SetQuestion("db.service.consul.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - - // expect an A RR - require.Len(t, in.Answer, 1) - aRec, ok := in.Answer[0].(*dns.A) - require.Truef(t, ok, "Not an A RR") - - require.Equal(t, "db.service.consul.", aRec.Hdr.Name) - require.Equal(t, "198.18.0.1", aRec.A.String()) - }) - } -} - -func TestDNS_ServiceLookupMultiAddrNoCNAME(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "198.18.0.1", - Service: &structs.NodeService{ - Service: "db", - Port: 12345, - Address: "198.18.0.1", - }, - } - - var out struct{} - require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) - } - - // Register a second node node with the same service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "198.18.0.2", - Service: &structs.NodeService{ - Service: "db", - Port: 12345, - Address: "bar.node.consul", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register a second node node with the same service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "baz", - Address: "198.18.0.3", - Service: &structs.NodeService{ - Service: "db", - Port: 12345, - Address: "198.18.0.3", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - m := new(dns.Msg) - m.SetQuestion("db.service.consul.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - - // expect two A RRs - require.Len(t, in.Answer, 2) - require.IsType(t, &dns.A{}, in.Answer[0]) - require.Equal(t, "db.service.consul.", in.Answer[0].Header().Name) - isOneOfTheseIPs := func(ip net.IP) bool { - if ip.Equal(net.ParseIP("198.18.0.1")) || ip.Equal(net.ParseIP("198.18.0.3")) { - return true - } - return false - } - require.True(t, isOneOfTheseIPs(in.Answer[0].(*dns.A).A)) - require.IsType(t, &dns.A{}, in.Answer[1]) - require.Equal(t, "db.service.consul.", in.Answer[1].Header().Name) - require.True(t, isOneOfTheseIPs(in.Answer[1].(*dns.A).A)) - }) - } -} - -func TestDNS_ServiceLookup(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - DNS: structs.QueryDNSOptions{ - TTL: "3s", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []string{ - "db.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", srvRec) - } - - aRec, ok := in.Extra[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Name != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - - if strings.Contains(question, "query") { - // The query should have the TTL associated with the query registration. - if srvRec.Hdr.Ttl != 3 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aRec.Hdr.Ttl != 3 { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - } else { - if srvRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - } - - } - - // Lookup a non-existing service/query, we should receive an SOA. - questions = []string{ - "nodb.service.consul.", - "nope.query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Ns) != 1 { - t.Fatalf("Bad: %#v", in) - } - - soaRec, ok := in.Ns[0].(*dns.SOA) - if !ok { - t.Fatalf("Bad: %#v", in.Ns[0]) - } - if soaRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Ns[0]) - } - } - }) - } -} - -func TestDNS_ServiceLookupWithInternalServiceAddress(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - node_name = "my.test-node" - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - // The service is using the consul DNS name as service address - // which triggers a lookup loop and a subsequent stack overflow - // crash. - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Address: "db.service.consul", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Looking up the service should not trigger a loop - m := new(dns.Msg) - m.SetQuestion("db.service.consul.", dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - wantAnswer := []dns.RR{ - &dns.SRV{ - Hdr: dns.RR_Header{Name: "db.service.consul.", Rrtype: 0x21, Class: 0x1, Rdlength: 0x1b}, - Priority: 0x1, - Weight: 0x1, - Port: 12345, - Target: "foo.node.dc1.consul.", - }, - } - require.Equal(t, wantAnswer, in.Answer, "answer") - wantExtra := []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{Name: "foo.node.dc1.consul.", Rrtype: 0x1, Class: 0x1, Rdlength: 0x4}, - A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 - }, - } - require.Equal(t, wantExtra, in.Extra, "extra") - }) - } -} - -func TestDNS_ConnectServiceLookup(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register - { - args := structs.TestRegisterRequestProxy(t) - args.Address = "127.0.0.55" - args.Service.Proxy.DestinationServiceName = "db" - args.Service.Address = "" - args.Service.Port = 12345 - var out struct{} - require.Nil(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) - } - - // Look up the service - questions := []string{ - "db.connect.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.Nil(t, err) - require.Len(t, in.Answer, 1) - - srvRec, ok := in.Answer[0].(*dns.SRV) - require.True(t, ok) - require.Equal(t, uint16(12345), srvRec.Port) - require.Equal(t, "foo.node.dc1.consul.", srvRec.Target) - require.Equal(t, uint32(0), srvRec.Hdr.Ttl) - - cnameRec, ok := in.Extra[0].(*dns.A) - require.True(t, ok) - require.Equal(t, "foo.node.dc1.consul.", cnameRec.Hdr.Name) - require.Equal(t, uint32(0), srvRec.Hdr.Ttl) - require.Equal(t, "127.0.0.55", cnameRec.A.String()) - } - }) - } -} - -func TestDNS_IngressServiceLookup(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register ingress-gateway service - { - args := structs.TestRegisterIngressGateway(t) - var out struct{} - require.Nil(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) - } - - // Register db service - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Address: "", - Port: 80, - }, - } - - var out struct{} - require.Nil(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) - } - - // Register proxy-defaults with 'http' protocol - { - req := structs.ConfigEntryRequest{ - Op: structs.ConfigEntryUpsert, - Datacenter: "dc1", - Entry: &structs.ProxyConfigEntry{ - Kind: structs.ProxyDefaults, - Name: structs.ProxyConfigGlobal, - Config: map[string]interface{}{ - "protocol": "http", - }, - }, - WriteRequest: structs.WriteRequest{Token: "root"}, - } - var out bool - require.Nil(t, a.RPC(context.Background(), "ConfigEntry.Apply", req, &out)) - require.True(t, out) - } - - // Register ingress-gateway config entry - { - args := &structs.IngressGatewayConfigEntry{ - Name: "ingress-gateway", - Kind: structs.IngressGateway, - Listeners: []structs.IngressListener{ - { - Port: 8888, - Protocol: "http", - Services: []structs.IngressService{ - {Name: "db"}, - {Name: "api"}, - }, - }, - }, - } - - req := structs.ConfigEntryRequest{ - Op: structs.ConfigEntryUpsert, - Datacenter: "dc1", - Entry: args, - } - var out bool - require.Nil(t, a.RPC(context.Background(), "ConfigEntry.Apply", req, &out)) - require.True(t, out) - } - - // Look up the service - questions := []string{ - "api.ingress.consul.", - "api.ingress.dc1.consul.", - "db.ingress.consul.", - "db.ingress.dc1.consul.", - } - for _, question := range questions { - t.Run(question, func(t *testing.T) { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeA) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.Nil(t, err) - require.Len(t, in.Answer, 1) - - cnameRec, ok := in.Answer[0].(*dns.A) - require.True(t, ok) - require.Equal(t, question, cnameRec.Hdr.Name) - require.Equal(t, uint32(0), cnameRec.Hdr.Ttl) - require.Equal(t, "127.0.0.1", cnameRec.A.String()) - }) - } - }) - } -} - -func TestDNS_ExternalServiceLookup(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with an external service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "www.google.com", - Service: &structs.NodeService{ - Service: "db", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service - questions := []string{ - "db.service.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 || len(in.Extra) > 0 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "www.google.com." { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - } - }) - } -} - -func TestDNS_ExternalServiceToConsulCNAMELookup(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - domain = "CONSUL." - node_name = "test node" - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register the initial node with a service - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "web", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "web", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an external service pointing to the 'web' service - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "alias", - Address: "web.service.consul", - Service: &structs.NodeService{ - Service: "alias", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly - questions := []string{ - "alias.service.consul.", - "alias.service.CoNsUl.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "web.service.consul." { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - if len(in.Extra) != 1 { - t.Fatalf("Bad: %#v", in) - } - - aRec, ok := in.Extra[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Name != "web.service.consul." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - } - }) - } -} - -func TestDNS_ExternalServiceToConsulCNAMENestedLookup(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - node_name = "test-node" - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register the initial node with a service - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "web", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "web", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an external service pointing to the 'web' service - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "alias", - Address: "web.service.consul", - Service: &structs.NodeService{ - Service: "alias", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an external service pointing to the 'alias' service - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "alias2", - Address: "alias.service.consul", - Service: &structs.NodeService{ - Service: "alias2", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly - questions := []string{ - "alias2.service.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "alias.service.consul." { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if len(in.Extra) != 2 { - t.Fatalf("Bad: %#v", in) - } - - cnameRec, ok := in.Extra[0].(*dns.CNAME) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if cnameRec.Hdr.Name != "alias.service.consul." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if cnameRec.Target != "web.service.consul." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if cnameRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - - aRec, ok := in.Extra[1].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[1]) - } - if aRec.Hdr.Name != "web.service.consul." { - t.Fatalf("Bad: %#v", in.Extra[1]) - } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Extra[1]) - } - if aRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Extra[1]) - } - } - }) - } -} - -func TestDNS_ServiceLookup_ServiceAddress_A(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Address: "127.0.0.2", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []string{ - "db.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "7f000002.addr.dc1.consul." { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - aRec, ok := in.Extra[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Name != "7f000002.addr.dc1.consul." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.A.String() != "127.0.0.2" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - } - }) - } -} - -func TestDNS_AltDomain_ServiceLookup_ServiceAddress_A(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - alt_domain = "test-domain" - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Address: "127.0.0.2", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []struct { - ask string - wantDomain string - }{ - {"db.service.consul.", "consul."}, - {id + ".query.consul.", "consul."}, - {"db.service.test-domain.", "test-domain."}, - {id + ".query.test-domain.", "test-domain."}, - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question.ask, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "7f000002.addr.dc1."+question.wantDomain { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - aRec, ok := in.Extra[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Name != "7f000002.addr.dc1."+question.wantDomain { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.A.String() != "127.0.0.2" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - } - }) - } -} - -func TestDNS_ServiceLookup_ServiceAddress_SRV(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - recursor := makeRecursor(t, dns.Msg{ - Answer: []dns.RR{ - dnsCNAME("www.google.com", "google.com"), - dnsA("google.com", "1.2.3.4"), - }, - }) - defer recursor.Shutdown() - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - recursors = ["`+recursor.Addr+`"] - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service whose address isn't an IP. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Address: "www.google.com", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - // Specify prepared query name containing "." to test - // since that is technically supported (though atypical). - var id string - preparedQueryName := "query.name.with.dots" - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: preparedQueryName, - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []string{ - "db.service.consul.", - id + ".query.consul.", - preparedQueryName + ".query.consul.", - fmt.Sprintf("_%s._tcp.query.consul.", id), - fmt.Sprintf("_%s._tcp.query.consul.", preparedQueryName), - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "www.google.com." { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - // Should have google CNAME - cnRec, ok := in.Extra[0].(*dns.CNAME) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if cnRec.Target != "google.com." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - - // Check we recursively resolve - aRec, ok := in.Extra[1].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[1]) - } - if aRec.A.String() != "1.2.3.4" { - t.Fatalf("Bad: %s", aRec.A.String()) - } - } - }) - } -} - -func TestDNS_ServiceLookup_ServiceAddressIPV6(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Address: "2607:20:4005:808::200e", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []string{ - "db.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "2607002040050808000000000000200e.addr.dc1.consul." { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - aRec, ok := in.Extra[0].(*dns.AAAA) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Name != "2607002040050808000000000000200e.addr.dc1.consul." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.AAAA.String() != "2607:20:4005:808::200e" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - } - }) - } -} - -func TestDNS_AltDomain_ServiceLookup_ServiceAddressIPV6(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - alt_domain = "test-domain" - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Address: "2607:20:4005:808::200e", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []struct { - ask string - want string - }{ - {"db.service.consul.", "2607002040050808000000000000200e.addr.dc1.consul."}, - {"db.service.test-domain.", "2607002040050808000000000000200e.addr.dc1.test-domain."}, - {id + ".query.consul.", "2607002040050808000000000000200e.addr.dc1.consul."}, - {id + ".query.test-domain.", "2607002040050808000000000000200e.addr.dc1.test-domain."}, - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question.ask, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != question.want { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - aRec, ok := in.Extra[0].(*dns.AAAA) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Name != question.want { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.AAAA.String() != "2607:20:4005:808::200e" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - } - }) - } -} - -func TestDNS_ServiceLookup_WanTranslation(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a1 := NewTestAgent(t, ` - datacenter = "dc1" - translate_wan_addrs = true - acl_datacenter = "" - `+experimentsHCL) - defer a1.Shutdown() - - a2 := NewTestAgent(t, ` - datacenter = "dc2" - translate_wan_addrs = true - acl_datacenter = "" - `+experimentsHCL) - defer a2.Shutdown() - - // Join WAN cluster - addr := fmt.Sprintf("127.0.0.1:%d", a1.Config.SerfPortWAN) - _, err := a2.JoinWAN([]string{addr}) - require.NoError(t, err) - retry.Run(t, func(r *retry.R) { - require.Len(r, a1.WANMembers(), 2) - require.Len(r, a2.WANMembers(), 2) - }) - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc2", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - require.NoError(t, a2.RPC(context.Background(), "PreparedQuery.Apply", args, &id)) - } - - type testCase struct { - nodeTaggedAddresses map[string]string - serviceAddress string - serviceTaggedAddresses map[string]structs.ServiceAddress - - dnsAddr string - - expectedPort uint16 - expectedAddress string - expectedARRName string - } - - cases := map[string]testCase{ - "node-addr-from-dc1": { - dnsAddr: a1.config.DNSAddrs[0].String(), - expectedPort: 8080, - expectedAddress: "127.0.0.1", - expectedARRName: "foo.node.dc2.consul.", - }, - "node-wan-from-dc1": { - dnsAddr: a1.config.DNSAddrs[0].String(), - nodeTaggedAddresses: map[string]string{ - "wan": "127.0.0.2", - }, - expectedPort: 8080, - expectedAddress: "127.0.0.2", - expectedARRName: "7f000002.addr.dc2.consul.", - }, - "service-addr-from-dc1": { - dnsAddr: a1.config.DNSAddrs[0].String(), - nodeTaggedAddresses: map[string]string{ - "wan": "127.0.0.2", - }, - serviceAddress: "10.0.1.1", - expectedPort: 8080, - expectedAddress: "10.0.1.1", - expectedARRName: "0a000101.addr.dc2.consul.", - }, - "service-wan-from-dc1": { - dnsAddr: a1.config.DNSAddrs[0].String(), - nodeTaggedAddresses: map[string]string{ - "wan": "127.0.0.2", - }, - serviceAddress: "10.0.1.1", - serviceTaggedAddresses: map[string]structs.ServiceAddress{ - "wan": { - Address: "198.18.0.1", - Port: 80, - }, - }, - expectedPort: 80, - expectedAddress: "198.18.0.1", - expectedARRName: "c6120001.addr.dc2.consul.", - }, - "node-addr-from-dc2": { - dnsAddr: a2.config.DNSAddrs[0].String(), - expectedPort: 8080, - expectedAddress: "127.0.0.1", - expectedARRName: "foo.node.dc2.consul.", - }, - "node-wan-from-dc2": { - dnsAddr: a2.config.DNSAddrs[0].String(), - nodeTaggedAddresses: map[string]string{ - "wan": "127.0.0.2", - }, - expectedPort: 8080, - expectedAddress: "127.0.0.1", - expectedARRName: "foo.node.dc2.consul.", - }, - "service-addr-from-dc2": { - dnsAddr: a2.config.DNSAddrs[0].String(), - nodeTaggedAddresses: map[string]string{ - "wan": "127.0.0.2", - }, - serviceAddress: "10.0.1.1", - expectedPort: 8080, - expectedAddress: "10.0.1.1", - expectedARRName: "0a000101.addr.dc2.consul.", - }, - "service-wan-from-dc2": { - dnsAddr: a2.config.DNSAddrs[0].String(), - nodeTaggedAddresses: map[string]string{ - "wan": "127.0.0.2", - }, - serviceAddress: "10.0.1.1", - serviceTaggedAddresses: map[string]structs.ServiceAddress{ - "wan": { - Address: "198.18.0.1", - Port: 80, - }, - }, - expectedPort: 8080, - expectedAddress: "10.0.1.1", - expectedARRName: "0a000101.addr.dc2.consul.", - }, - } - - for name, tc := range cases { - name := name - tc := tc - t.Run(name, func(t *testing.T) { - // Register a remote node with a service. This is in a retry since we - // need the datacenter to have a route which takes a little more time - // beyond the join, and we don't have direct access to the router here. - retry.Run(t, func(r *retry.R) { - args := &structs.RegisterRequest{ - Datacenter: "dc2", - Node: "foo", - Address: "127.0.0.1", - TaggedAddresses: tc.nodeTaggedAddresses, - Service: &structs.NodeService{ - Service: "db", - Address: tc.serviceAddress, - Port: 8080, - TaggedAddresses: tc.serviceTaggedAddresses, - }, - } - - var out struct{} - require.NoError(r, a2.RPC(context.Background(), "Catalog.Register", args, &out)) - }) - - // Look up the SRV record via service and prepared query. - questions := []string{ - "db.service.dc2.consul.", - id + ".query.dc2.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - - addr := tc.dnsAddr - in, _, err := c.Exchange(m, addr) - require.NoError(t, err) - require.Len(t, in.Answer, 1) - srvRec, ok := in.Answer[0].(*dns.SRV) - require.True(t, ok, "Bad: %#v", in.Answer[0]) - require.Equal(t, tc.expectedPort, srvRec.Port) - - aRec, ok := in.Extra[0].(*dns.A) - require.True(t, ok, "Bad: %#v", in.Extra[0]) - require.Equal(t, tc.expectedARRName, aRec.Hdr.Name) - require.Equal(t, tc.expectedAddress, aRec.A.String()) - } - - // Also check the A record directly - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeA) - - c := new(dns.Client) - addr := tc.dnsAddr - in, _, err := c.Exchange(m, addr) - require.NoError(t, err) - require.Len(t, in.Answer, 1) - - aRec, ok := in.Answer[0].(*dns.A) - require.True(t, ok, "Bad: %#v", in.Answer[0]) - require.Equal(t, question, aRec.Hdr.Name) - require.Equal(t, tc.expectedAddress, aRec.A.String()) - } - }) - } - }) - } -} - -func TestDNS_ServiceLookup_CaseInsensitive(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - tests := []struct { - name string - config string - }{ - // UDP + EDNS - {"normal", ""}, - {"cache", `dns_config{ allow_stale=true, max_stale="3h", use_cache=true, "cache_max_age"="3h"}`}, - {"cache-with-streaming", ` - rpc{ - enable_streaming=true - } - use_streaming_backend=true - dns_config{ allow_stale=true, max_stale="3h", use_cache=true, "cache_max_age"="3h"} - `}, - } - for _, tst := range tests { - t.Run(fmt.Sprintf("A lookup %v", tst.name), func(t *testing.T) { - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, fmt.Sprintf("%s %s", tst.config, experimentsHCL)) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "Db", - Tags: []string{"Primary"}, - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query, as well as a name. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "somequery", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Try some variations to make sure case doesn't matter. - questions := []string{ - "primary.Db.service.consul.", - "primary.db.service.consul.", - "pRIMARY.dB.service.consul.", - "PRIMARY.dB.service.consul.", - "db.service.consul.", - "DB.service.consul.", - "Db.service.consul.", - "somequery.query.consul.", - "SomeQuery.query.consul.", - "SOMEQUERY.query.consul.", - } - - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - retry.Run(t, func(r *retry.R) { - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - r.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - r.Fatalf("question %v, empty lookup: %#v", question, in) - } - }) - } - }) - } - }) - } -} - -// V2 DNS: we have deprecated support for service tags w/ periods -func TestDNS_ServiceLookup_TagPeriod(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(false) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"v1.primary"}, - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m1 := new(dns.Msg) - m1.SetQuestion("v1.primary2.db.service.consul.", dns.TypeSRV) - - c1 := new(dns.Client) - in, _, err := c1.Exchange(m1, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 0 { - t.Fatalf("Bad: %#v", in) - } - - m := new(dns.Msg) - m.SetQuestion("v1.primary.db.service.consul.", dns.TypeSRV) - - c := new(dns.Client) - in, _, err = c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", srvRec) - } - - aRec, ok := in.Extra[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Name != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - }) - } -} - -func TestDNS_ServiceLookup_PreparedQueryNamePeriod(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register a prepared query with a period in the name. - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "some.query.we.like", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - - var id string - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - m := new(dns.Msg) - m.SetQuestion("some.query.we.like.query.consul.", dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", srvRec) - } - - aRec, ok := in.Extra[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Name != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - }) - } -} - -func TestDNS_ServiceLookup_Dedup(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a single node with multiple instances of a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args = &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - ID: "db2", - Service: "db", - Tags: []string{"replica"}, - Port: 12345, - }, - } - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args = &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - ID: "db3", - Service: "db", - Tags: []string{"replica"}, - Port: 12346, - }, - } - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query, make sure only - // one IP is returned. - questions := []string{ - "db.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - aRec, ok := in.Answer[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - } - }) - } -} - -func TestDNS_ServiceLookup_Dedup_SRV(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a single node with multiple instances of a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args = &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - ID: "db2", - Service: "db", - Tags: []string{"replica"}, - Port: 12345, - }, - } - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args = &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - ID: "db3", - Service: "db", - Tags: []string{"replica"}, - Port: 12346, - }, - } - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query, make sure only - // one IP is returned and two unique ports are returned. - questions := []string{ - "db.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 2 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 && srvRec.Port != 12346 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", srvRec) - } - - srvRec, ok = in.Answer[1].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[1]) - } - if srvRec.Port != 12346 && srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Port == in.Answer[0].(*dns.SRV).Port { - t.Fatalf("should be a different port") - } - if srvRec.Target != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", srvRec) - } - - aRec, ok := in.Extra[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Name != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - } - }) - } -} - -func TestDNS_ServiceLookup_FilterCritical(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register nodes with health checks in various states. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - Check: &structs.HealthCheck{ - CheckID: "serf", - Name: "serf", - Status: api.HealthCritical, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args2 := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.2", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - Check: &structs.HealthCheck{ - CheckID: "serf", - Name: "serf", - Status: api.HealthCritical, - }, - } - if err := a.RPC(context.Background(), "Catalog.Register", args2, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args3 := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.2", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - Check: &structs.HealthCheck{ - CheckID: "db", - Name: "db", - ServiceID: "db", - Status: api.HealthCritical, - }, - } - if err := a.RPC(context.Background(), "Catalog.Register", args3, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args4 := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "baz", - Address: "127.0.0.3", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - } - if err := a.RPC(context.Background(), "Catalog.Register", args4, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args5 := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "quux", - Address: "127.0.0.4", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - Check: &structs.HealthCheck{ - CheckID: "db", - Name: "db", - ServiceID: "db", - Status: api.HealthWarning, - }, - } - if err := a.RPC(context.Background(), "Catalog.Register", args5, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []string{ - "db.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Only 4 and 5 are not failing, so we should get 2 answers - if len(in.Answer) != 2 { - t.Fatalf("Bad: %#v", in) - } - - ips := make(map[string]bool) - for _, resp := range in.Answer { - aRec := resp.(*dns.A) - ips[aRec.A.String()] = true - } - - if !ips["127.0.0.3"] { - t.Fatalf("Bad: %#v should contain 127.0.0.3 (state healthy)", in) - } - if !ips["127.0.0.4"] { - t.Fatalf("Bad: %#v should contain 127.0.0.4 (state warning)", in) - } - } - }) - } -} - -func TestDNS_ServiceLookup_OnlyFailing(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register nodes with all health checks in a critical state. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - Check: &structs.HealthCheck{ - CheckID: "serf", - Name: "serf", - Status: api.HealthCritical, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args2 := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.2", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - Check: &structs.HealthCheck{ - CheckID: "serf", - Name: "serf", - Status: api.HealthCritical, - }, - } - if err := a.RPC(context.Background(), "Catalog.Register", args2, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args3 := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.2", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - Check: &structs.HealthCheck{ - CheckID: "db", - Name: "db", - ServiceID: "db", - Status: api.HealthCritical, - }, - } - if err := a.RPC(context.Background(), "Catalog.Register", args3, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []string{ - "db.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // All 3 are failing, so we should get 0 answers and an NXDOMAIN response - if len(in.Answer) != 0 { - t.Fatalf("Bad: %#v", in) - } - - if in.Rcode != dns.RcodeNameError { - t.Fatalf("Bad: %#v", in) - } - } - }) - } -} - -func TestDNS_ServiceLookup_OnlyPassing(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - dns_config { - only_passing = true - } - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register nodes with health checks in various states. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - Check: &structs.HealthCheck{ - CheckID: "db", - Name: "db", - ServiceID: "db", - Status: api.HealthPassing, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args2 := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.2", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - Check: &structs.HealthCheck{ - CheckID: "db", - Name: "db", - ServiceID: "db", - Status: api.HealthWarning, - }, - } - - if err := a.RPC(context.Background(), "Catalog.Register", args2, &out); err != nil { - t.Fatalf("err: %v", err) - } - - args3 := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "baz", - Address: "127.0.0.3", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - Check: &structs.HealthCheck{ - CheckID: "db", - Name: "db", - ServiceID: "db", - Status: api.HealthCritical, - }, - } - - if err := a.RPC(context.Background(), "Catalog.Register", args3, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - OnlyPassing: true, - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []string{ - "db.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Only 1 is passing, so we should only get 1 answer - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - resp := in.Answer[0] - aRec := resp.(*dns.A) - - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - } - - newCfg := *a.Config - newCfg.DNSOnlyPassing = false - err := a.reloadConfigInternal(&newCfg) - require.NoError(t, err) - - // only_passing is now false. we should now get two nodes - m := new(dns.Msg) - m.SetQuestion("db.service.consul.", dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - - require.Equal(t, 2, len(in.Answer)) - ips := []string{in.Answer[0].(*dns.A).A.String(), in.Answer[1].(*dns.A).A.String()} - sort.Strings(ips) - require.Equal(t, []string{"127.0.0.1", "127.0.0.2"}, ips) - }) - } -} - -func TestDNS_ServiceLookup_Randomize(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a large number of nodes. - for i := 0; i < generateNumNodes; i++ { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: fmt.Sprintf("foo%d", i), - Address: fmt.Sprintf("127.0.0.%d", i+1), - Service: &structs.NodeService{ - Service: "web", - Port: 8000, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "web", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. Ensure the - // response is randomized each time. - questions := []string{ - "web.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - uniques := map[string]struct{}{} - for i := 0; i < 10; i++ { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeANY) - - c := &dns.Client{Net: "udp"} - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Response length should be truncated and we should get - // an A record for each response. - if len(in.Answer) != defaultNumUDPResponses { - t.Fatalf("Bad: %#v", len(in.Answer)) - } - - // Collect all the names. - var names []string - for _, rec := range in.Answer { - switch v := rec.(type) { - case *dns.SRV: - names = append(names, v.Target) - case *dns.A: - names = append(names, v.A.String()) - } - } - nameS := strings.Join(names, "|") - - // Tally the results. - uniques[nameS] = struct{}{} - } - - // Give some wiggle room. Since the responses are randomized and - // there is a finite number of combinations, requiring 0 - // duplicates every test run eventually gives us failures. - if len(uniques) < 2 { - t.Fatalf("unique response ratio too low: %d/10\n%v", len(uniques), uniques) - } - } - }) - } -} - -func TestDNS_ServiceLookup_Truncate(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - dns_config { - enable_truncate = true - } - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a large number of nodes. - for i := 0; i < generateNumNodes; i++ { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: fmt.Sprintf("foo%d", i), - Address: fmt.Sprintf("127.0.0.%d", i+1), - Service: &structs.NodeService{ - Service: "web", - Port: 8000, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "web", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. Ensure the - // response is truncated each time. - questions := []string{ - "web.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Check for the truncate bit - if !in.Truncated { - t.Fatalf("should have truncate bit") - } - } - }) - } -} - -func TestDNS_ServiceLookup_LargeResponses(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - dns_config { - enable_truncate = true - } - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - longServiceName := "this-is-a-very-very-very-very-very-long-name-for-a-service" - - // Register a lot of nodes. - for i := 0; i < 4; i++ { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: fmt.Sprintf("foo%d", i), - Address: fmt.Sprintf("127.0.0.%d", i+1), - Service: &structs.NodeService{ - Service: longServiceName, - Tags: []string{"primary"}, - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: longServiceName, - Service: structs.ServiceQuery{ - Service: longServiceName, - Tags: []string{"primary"}, - }, - }, - } - var id string - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []string{ - "_" + longServiceName + "._primary.service.consul.", - longServiceName + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if !in.Truncated { - t.Fatalf("should have truncate bit") - } - - // Make sure the response size is RFC 1035-compliant for UDP messages - if in.Len() > 512 { - t.Fatalf("Bad: %d", in.Len()) - } - - // We should only have two answers now - if len(in.Answer) != 2 { - t.Fatalf("Bad: %d", len(in.Answer)) - } - - // Make sure the ADDITIONAL section matches the ANSWER section. - if len(in.Answer) != len(in.Extra) { - t.Fatalf("Bad: %d vs. %d", len(in.Answer), len(in.Extra)) - } - for i := 0; i < len(in.Answer); i++ { - srv, ok := in.Answer[i].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[i]) - } - - a, ok := in.Extra[i].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[i]) - } - - if srv.Target != a.Hdr.Name { - t.Fatalf("Bad: %#v %#v", srv, a) - } - } - - // Check for the truncate bit - if !in.Truncated { - t.Fatalf("should have truncate bit") - } - } - }) - } -} - -func testDNSServiceLookupResponseLimits(t *testing.T, answerLimit int, qType uint16, - expectedService, expectedQuery, expectedQueryID int, additionalHCL string) (bool, error) { - a := NewTestAgent(t, ` - node_name = "test-node" - dns_config { - udp_answer_limit = `+fmt.Sprintf("%d", answerLimit)+` - } - `+additionalHCL) - defer a.Shutdown() - testrpc.WaitForTestAgent(t, a.RPC, "dc1") - - choices := perfectlyRandomChoices(generateNumNodes, pctNodesWithIPv6) - for i := 0; i < generateNumNodes; i++ { - nodeAddress := fmt.Sprintf("127.0.0.%d", i+1) - if choices[i] { - nodeAddress = fmt.Sprintf("fe80::%d", i+1) - } - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: fmt.Sprintf("foo%d", i), - Address: nodeAddress, - Service: &structs.NodeService{ - Service: "api-tier", - Port: 8080, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - return false, fmt.Errorf("err: %v", err) - } - } - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "api-tier", - Service: structs.ServiceQuery{ - Service: "api-tier", - }, - }, - } - - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - return false, fmt.Errorf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []string{ - "api-tier.service.consul.", - "api-tier.query.consul.", - id + ".query.consul.", - } - for idx, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, qType) - - c := &dns.Client{Net: "udp"} - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - return false, fmt.Errorf("err: %v", err) - } - - switch idx { - case 0: - if (expectedService > 0 && len(in.Answer) != expectedService) || - (expectedService < -1 && len(in.Answer) < lib.AbsInt(expectedService)) { - return false, fmt.Errorf("%d/%d answers received for type %v for %s, sz:=%d", len(in.Answer), answerLimit, qType, question, in.Len()) - } - case 1: - if (expectedQuery > 0 && len(in.Answer) != expectedQuery) || - (expectedQuery < -1 && len(in.Answer) < lib.AbsInt(expectedQuery)) { - return false, fmt.Errorf("%d/%d answers received for type %v for %s, sz:=%d", len(in.Answer), answerLimit, qType, question, in.Len()) - } - case 2: - if (expectedQueryID > 0 && len(in.Answer) != expectedQueryID) || - (expectedQueryID < -1 && len(in.Answer) < lib.AbsInt(expectedQueryID)) { - return false, fmt.Errorf("%d/%d answers received for type %v for %s, sz:=%d", len(in.Answer), answerLimit, qType, question, in.Len()) - } - default: - panic("abort") - } - } - - return true, nil -} - -func checkDNSService( - t *testing.T, - generateNumNodes int, - aRecordLimit int, - qType uint16, - expectedResultsCount int, - udpSize uint16, -) { - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - node_name = "test-node" - dns_config { - a_record_limit = `+fmt.Sprintf("%d", aRecordLimit)+` - udp_answer_limit = `+fmt.Sprintf("%d", aRecordLimit)+` - } - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForTestAgent(t, a.RPC, "dc1") - - choices := perfectlyRandomChoices(generateNumNodes, pctNodesWithIPv6) - for i := 0; i < generateNumNodes; i++ { - nodeAddress := fmt.Sprintf("127.0.0.%d", i+1) - if choices[i] { - nodeAddress = fmt.Sprintf("fe80::%d", i+1) - } - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: fmt.Sprintf("foo%d", i), - Address: nodeAddress, - Service: &structs.NodeService{ - Service: "api-tier", - Port: 8080, - }, - } - - var out struct{} - require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) - } - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "api-tier", - Service: structs.ServiceQuery{ - Service: "api-tier", - }, - }, - } - - require.NoError(t, a.RPC(context.Background(), "PreparedQuery.Apply", args, &id)) - } - - // Look up the service directly and via prepared query. - questions := []string{ - "api-tier.service.consul.", - "api-tier.query.consul.", - id + ".query.consul.", - } - for _, question := range questions { - question := question - t.Run("question: "+question, func(t *testing.T) { - - m := new(dns.Msg) - - m.SetQuestion(question, qType) - protocol := "tcp" - if udpSize > 0 { - protocol = "udp" - } - if udpSize > 512 { - m.SetEdns0(udpSize, true) - } - c := &dns.Client{Net: protocol, UDPSize: 8192} - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - - t.Logf("DNS Response for %+v - %+v", m, in) - - require.Equal(t, expectedResultsCount, len(in.Answer), - "%d/%d answers received for type %v for %s (%s)", len(in.Answer), expectedResultsCount, qType, question, protocol) - }) - } - }) - } -} - -func TestDNS_ServiceLookup_ARecordLimits(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - tests := []struct { - name string - aRecordLimit int - expectedAResults int - expectedAAAAResults int - expectedANYResults int - expectedSRVResults int - numNodesTotal int - udpSize uint16 - _unused_udpAnswerLimit int // NOTE: this field is not used - }{ - // UDP + EDNS - {"udp-edns-1", 1, 1, 1, 1, 30, 30, 8192, 3}, - {"udp-edns-2", 2, 2, 2, 2, 30, 30, 8192, 3}, - {"udp-edns-3", 3, 3, 3, 3, 30, 30, 8192, 3}, - {"udp-edns-4", 4, 4, 4, 4, 30, 30, 8192, 3}, - {"udp-edns-5", 5, 5, 5, 5, 30, 30, 8192, 3}, - {"udp-edns-6", 6, 6, 6, 6, 30, 30, 8192, 3}, - {"udp-edns-max", 6, 2, 1, 3, 3, 3, 8192, 3}, - // All UDP without EDNS have a limit of 2 answers due to udpAnswerLimit - // Even SRV records are limit to 2 records - {"udp-limit-1", 1, 1, 0, 1, 1, 1, 512, 2}, - {"udp-limit-2", 2, 1, 1, 2, 2, 2, 512, 2}, - // AAAA results limited by size of payload - {"udp-limit-3", 3, 1, 1, 2, 2, 2, 512, 2}, - {"udp-limit-4", 4, 1, 1, 2, 2, 2, 512, 2}, - {"udp-limit-5", 5, 1, 1, 2, 2, 2, 512, 2}, - {"udp-limit-6", 6, 1, 1, 2, 2, 2, 512, 2}, - {"udp-limit-max", 6, 1, 1, 2, 2, 2, 512, 2}, - // All UDP without EDNS and no udpAnswerLimit - // Size of records is limited by UDP payload - {"udp-1", 1, 1, 0, 1, 1, 1, 512, 0}, - {"udp-2", 2, 1, 1, 2, 2, 2, 512, 0}, - {"udp-3", 3, 1, 1, 2, 2, 2, 512, 0}, - {"udp-4", 4, 1, 1, 2, 2, 2, 512, 0}, - {"udp-5", 5, 1, 1, 2, 2, 2, 512, 0}, - {"udp-6", 6, 1, 1, 2, 2, 2, 512, 0}, - // Only 3 A and 3 SRV records on 512 bytes - {"udp-max", 6, 1, 1, 2, 2, 2, 512, 0}, - - {"tcp-1", 1, 1, 1, 1, 30, 30, 0, 0}, - {"tcp-2", 2, 2, 2, 2, 30, 30, 0, 0}, - {"tcp-3", 3, 3, 3, 3, 30, 30, 0, 0}, - {"tcp-4", 4, 4, 4, 4, 30, 30, 0, 0}, - {"tcp-5", 5, 5, 5, 5, 30, 30, 0, 0}, - {"tcp-6", 6, 6, 6, 6, 30, 30, 0, 0}, - {"tcp-max", 6, 1, 1, 2, 2, 2, 0, 0}, - } - for _, test := range tests { - test := test // capture loop var - - t.Run(test.name, func(t *testing.T) { - - // All those queries should have at max queriesLimited elements - - t.Run("A", func(t *testing.T) { - checkDNSService(t, test.numNodesTotal, test.aRecordLimit, dns.TypeA, test.expectedAResults, test.udpSize) - }) - - t.Run("AAAA", func(t *testing.T) { - checkDNSService(t, test.numNodesTotal, test.aRecordLimit, dns.TypeAAAA, test.expectedAAAAResults, test.udpSize) - }) - - t.Run("ANY", func(t *testing.T) { - checkDNSService(t, test.numNodesTotal, test.aRecordLimit, dns.TypeANY, test.expectedANYResults, test.udpSize) - }) - - // No limits but the size of records for SRV records, since not subject to randomization issues - t.Run("SRV", func(t *testing.T) { - checkDNSService(t, test.expectedSRVResults, test.aRecordLimit, dns.TypeSRV, test.numNodesTotal, test.udpSize) - }) - }) - } -} - -func TestDNS_ServiceLookup_AnswerLimits(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - - // Build a matrix of config parameters (udpAnswerLimit), and the - // length of the response per query type and question. Negative - // values imply the test must return at least the abs(value) number - // of records in the answer section. This is required because, for - // example, on OS-X and Linux, the number of answers returned in a - // 512B response is different even though both platforms are x86_64 - // and using the same version of Go. - // - // TODO(sean@): Why is it not identical everywhere when using the - // same compiler? - tests := []struct { - name string - udpAnswerLimit int - expectedAService int - expectedAQuery int - expectedAQueryID int - expectedAAAAService int - expectedAAAAQuery int - expectedAAAAQueryID int - expectedANYService int - expectedANYQuery int - expectedANYQueryID int - }{ - {"0", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {"1", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, - {"2", 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, - {"3", 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}, - {"4", 4, 4, 4, 4, 4, 4, 4, 4, 4, 4}, - {"5", 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}, - {"6", 6, 6, 6, 6, 6, 6, 5, 6, 6, -5}, - {"7", 7, 7, 7, 6, 7, 7, 5, 7, 7, -5}, - {"8", 8, 8, 8, 6, 8, 8, 5, 8, 8, -5}, - {"9", 9, 8, 8, 6, 8, 8, 5, 8, 8, -5}, - {"20", 20, 8, 8, 6, 8, 8, 5, 8, -5, -5}, - {"30", 30, 8, 8, 6, 8, 8, 5, 8, -5, -5}, - } - for _, test := range tests { - test := test // capture loop var - t.Run(fmt.Sprintf("A lookup %v", test), func(t *testing.T) { - ok, err := testDNSServiceLookupResponseLimits(t, test.udpAnswerLimit, dns.TypeA, test.expectedAService, test.expectedAQuery, test.expectedAQueryID, experimentsHCL) - if !ok { - t.Fatalf("Expected service A lookup %s to pass: %v", test.name, err) - } - }) - - t.Run(fmt.Sprintf("AAAA lookup %v", test), func(t *testing.T) { - ok, err := testDNSServiceLookupResponseLimits(t, test.udpAnswerLimit, dns.TypeAAAA, test.expectedAAAAService, test.expectedAAAAQuery, test.expectedAAAAQueryID, experimentsHCL) - if !ok { - t.Fatalf("Expected service AAAA lookup %s to pass: %v", test.name, err) - } - }) - - t.Run(fmt.Sprintf("ANY lookup %v", test), func(t *testing.T) { - ok, err := testDNSServiceLookupResponseLimits(t, test.udpAnswerLimit, dns.TypeANY, test.expectedANYService, test.expectedANYQuery, test.expectedANYQueryID, experimentsHCL) - if !ok { - t.Fatalf("Expected service ANY lookup %s to pass: %v", test.name, err) - } - }) - } - }) - } -} - -func TestDNS_ServiceLookup_CNAME(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - recursor := makeRecursor(t, dns.Msg{ - Answer: []dns.RR{ - dnsCNAME("www.google.com", "google.com"), - dnsA("google.com", "1.2.3.4"), - }, - }) - defer recursor.Shutdown() - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - recursors = ["`+recursor.Addr+`"] - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a name for an address. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "google", - Address: "www.google.com", - Service: &structs.NodeService{ - Service: "search", - Port: 80, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "search", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []string{ - "search.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Service CNAME, google CNAME, google A record - if len(in.Answer) != 3 { - t.Fatalf("Bad: %#v", in) - } - - // Should have service CNAME - cnRec, ok := in.Answer[0].(*dns.CNAME) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if cnRec.Target != "www.google.com." { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - // Should have google CNAME - cnRec, ok = in.Answer[1].(*dns.CNAME) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[1]) - } - if cnRec.Target != "google.com." { - t.Fatalf("Bad: %#v", in.Answer[1]) - } - - // Check we recursively resolve - if _, ok := in.Answer[2].(*dns.A); !ok { - t.Fatalf("Bad: %#v", in.Answer[2]) - } - } - }) - } -} - -func TestDNS_ServiceLookup_ServiceAddress_CNAME(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - recursor := makeRecursor(t, dns.Msg{ - Answer: []dns.RR{ - dnsCNAME("www.google.com", "google.com"), - dnsA("google.com", "1.2.3.4"), - }, - }) - defer recursor.Shutdown() - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - recursors = ["`+recursor.Addr+`"] - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a name for an address. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "google", - Address: "1.2.3.4", - Service: &structs.NodeService{ - Service: "search", - Port: 80, - Address: "www.google.com", - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "search", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Look up the service directly and via prepared query. - questions := []string{ - "search.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeANY) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Service CNAME, google CNAME, google A record - if len(in.Answer) != 3 { - t.Fatalf("Bad: %#v", in) - } - - // Should have service CNAME - cnRec, ok := in.Answer[0].(*dns.CNAME) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if cnRec.Target != "www.google.com." { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - // Should have google CNAME - cnRec, ok = in.Answer[1].(*dns.CNAME) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[1]) - } - if cnRec.Target != "google.com." { - t.Fatalf("Bad: %#v", in.Answer[1]) - } - - // Check we recursively resolve - if _, ok := in.Answer[2].(*dns.A); !ok { - t.Fatalf("Bad: %#v", in.Answer[2]) - } - } - }) - } -} - -func TestDNS_ServiceLookup_TTL(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - dns_config { - service_ttl = { - "d*" = "42s" - "db" = "10s" - "db*" = "66s" - "*" = "5s" - } - allow_stale = true - max_stale = "1s" - } - `+experimentsHCL) - defer a.Shutdown() - - for idx, service := range []string{"db", "dblb", "dk", "api"} { - nodeName := fmt.Sprintf("foo%d", idx) - address := fmt.Sprintf("127.0.0.%d", idx) - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: nodeName, - Address: address, - Service: &structs.NodeService{ - Service: service, - Tags: []string{"primary"}, - Port: 12345 + idx, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } - - c := new(dns.Client) - expectResult := func(dnsQuery string, expectedTTL uint32) { - t.Run(dnsQuery, func(t *testing.T) { - m := new(dns.Msg) - m.SetQuestion(dnsQuery, dns.TypeSRV) - - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v, len is %d", in, len(in.Answer)) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Hdr.Ttl != expectedTTL { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - aRec, ok := in.Extra[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Ttl != expectedTTL { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - }) - } - // Should have its exact TTL - expectResult("db.service.consul.", 10) - // Should match db* - expectResult("dblb.service.consul.", 66) - // Should match d* - expectResult("dk.service.consul.", 42) - // Should match * - expectResult("api.service.consul.", 5) - }) - } -} - -func TestDNS_ServiceLookup_SRV_RFC(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - questions := []string{ - "_db._primary.service.dc1.consul.", - "_db._primary.service.consul.", - "_db._primary.dc1.consul.", - "_db._primary.consul.", - } - - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - aRec, ok := in.Extra[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Name != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - } - }) - } -} - -func TestDNS_ServiceLookup_SRV_RFC_TCP_Default(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - questions := []string{ - "_db._tcp.service.dc1.consul.", - "_db._tcp.service.consul.", - "_db._tcp.dc1.consul.", - "_db._tcp.consul.", - } - - for _, question := range questions { - t.Run(question, func(t *testing.T) { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } - - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Target != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", srvRec) - } - if srvRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - - aRec, ok := in.Extra[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Name != "foo.node.dc1.consul." { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - if aRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Extra[0]) - } - }) - } - }) - } - -} - -func initDNSToken(t *testing.T, rpc RPC) { - t.Helper() - - reqToken := structs.ACLTokenSetRequest{ - Datacenter: "dc1", - ACLToken: structs.ACLToken{ - SecretID: "279d4735-f8ca-4d48-b5cc-c00a9713bbf8", - Policies: nil, - TemplatedPolicies: []*structs.ACLTemplatedPolicy{{TemplateName: "builtin/dns"}}, - }, - WriteRequest: structs.WriteRequest{Token: "root"}, - } - err := rpc.RPC(context.Background(), "ACL.TokenSet", &reqToken, &structs.ACLToken{}) - require.NoError(t, err) -} - -func TestDNS_ServiceLookup_FilterACL(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - tests := []struct { - token string - results int - }{ - {"root", 1}, - {"anonymous", 0}, - {"dns", 1}, - } - for _, tt := range tests { - t.Run("ACLToken == "+tt.token, func(t *testing.T) { - hcl := ` - primary_datacenter = "dc1" - - acl { - enabled = true - default_policy = "deny" - down_policy = "deny" - - tokens { - initial_management = "root" -` - if tt.token == "dns" { - // Create a UUID for dns token since it doesn't have an alias - dnsToken := "279d4735-f8ca-4d48-b5cc-c00a9713bbf8" - - hcl = hcl + ` - default = "anonymous" - dns = "` + dnsToken + `" -` - } else { - hcl = hcl + ` - default = "` + tt.token + `" -` - } - - hcl = hcl + ` - } - } - ` - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, hcl+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - if tt.token == "dns" { - initDNSToken(t, a) - } - - // Register a service - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "foo", - Port: 12345, - }, - WriteRequest: structs.WriteRequest{Token: "root"}, - } - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Set up the DNS query - c := new(dns.Client) - m := new(dns.Msg) - m.SetQuestion("foo.service.consul.", dns.TypeA) - - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(in.Answer) != tt.results { - t.Fatalf("Bad: %#v", in) - } - }) - } - }) - } -} - -func TestDNS_ServiceLookup_MetaTXT(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, `dns_config = { enable_additional_node_meta_txt = true } `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.1", - NodeMeta: map[string]string{ - "key": "value", - }, - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("db.service.consul.", dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - wantAdditional := []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, - A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 - }, - &dns.TXT{ - Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xa}, - Txt: []string{"key=value"}, - }, - } - require.Equal(t, wantAdditional, in.Extra) - }) - } -} - -func TestDNS_ServiceLookup_SuppressTXT(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, `dns_config = { enable_additional_node_meta_txt = false } `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a node with a service. - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.1", - NodeMeta: map[string]string{ - "key": "value", - }, - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - } - - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - m := new(dns.Msg) - m.SetQuestion("db.service.consul.", dns.TypeSRV) - - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - - wantAdditional := []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, - A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 - }, - } - require.Equal(t, wantAdditional, in.Extra) - }) - } -} diff --git a/agent/dns_test.go b/agent/dns_test.go index 35e80a856a07d..6db0036e5276a 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -1,17 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent -/* Note: this file got to be 10k lines long and caused multiple IDE issues - * as well as GitHub's UI unable to display diffs with large changes to this file. - * This file has been broken up by moving: - * - Node Lookup tests into dns_node_lookup_test.go - * - Service Lookup tests into dn_service_lookup_test.go - * - * Please be aware of the size of each of these files and add tests / break - * up tests accordingly. - */ import ( "context" "errors" @@ -20,20 +11,21 @@ import ( "math/rand" "net" "reflect" + "sort" "strings" "testing" "time" + "github.com/hashicorp/serf/coordinate" "github.com/miekg/dns" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" - "github.com/hashicorp/serf/coordinate" - - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/consul" + agentdns "github.com/hashicorp/consul/agent/dns" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" @@ -117,19 +109,8 @@ func dnsTXT(src string, txt []string) *dns.TXT { } } -func getVersionHCL(enableV2 bool) map[string]string { - versions := map[string]string{ - "DNS: v1 / Catalog: v1": "", - } - - if enableV2 { - versions["DNS: v2 / Catalog: v1"] = `experiments=["v2dns"]` - } - return versions -} - -// Copied to agent/dns/recursor_test.go -func TestDNS_RecursorAddr(t *testing.T) { +func TestRecursorAddr(t *testing.T) { + t.Parallel() addr, err := recursorAddr("8.8.8.8") if err != nil { t.Fatalf("err: %v", err) @@ -154,7 +135,7 @@ func TestDNS_RecursorAddr(t *testing.T) { } } -func TestDNS_EncodeKVasRFC1464(t *testing.T) { +func TestEncodeKVasRFC1464(t *testing.T) { // Test cases are from rfc1464 type rfc1464Test struct { key, value, internalForm, externalForm string @@ -188,38 +169,35 @@ func TestDNS_Over_TCP(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "Foo", - Address: "127.0.0.1", - } + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "Foo", + Address: "127.0.0.1", + } - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - m := new(dns.Msg) - m.SetQuestion("foo.node.dc1.consul.", dns.TypeANY) + m := new(dns.Msg) + m.SetQuestion("foo.node.dc1.consul.", dns.TypeANY) - c := new(dns.Client) - c.Net = "tcp" - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + c := new(dns.Client) + c.Net = "tcp" + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - if len(in.Answer) != 1 { - t.Fatalf("empty lookup: %#v", in) - } - }) + if len(in.Answer) != 1 { + t.Fatalf("empty lookup: %#v", in) } } @@ -228,28 +206,252 @@ func TestDNS_EmptyAltDomain(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - m := new(dns.Msg) - m.SetQuestion("consul.service.", dns.TypeA) + m := new(dns.Msg) + m.SetQuestion("consul.service.", dns.TypeA) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - require.Empty(t, in.Answer) - }) + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + require.Empty(t, in.Answer) +} + +func TestDNS_NodeLookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + TaggedAddresses: map[string]string{ + "wan": "127.0.0.2", + }, + NodeMeta: map[string]string{ + "key": "value", + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m := new(dns.Msg) + m.SetQuestion("foo.node.consul.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + require.Len(t, in.Answer, 2) + require.Len(t, in.Extra, 0) + + aRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok, "First answer is not an A record") + require.Equal(t, "127.0.0.1", aRec.A.String()) + require.Equal(t, uint32(0), aRec.Hdr.Ttl) + + txt, ok := in.Answer[1].(*dns.TXT) + require.True(t, ok, "Second answer is not a TXT record") + require.Len(t, txt.Txt, 1) + require.Equal(t, "key=value", txt.Txt[0]) + + // Re-do the query, but only for an A RR + + m = new(dns.Msg) + m.SetQuestion("foo.node.consul.", dns.TypeA) + + c = new(dns.Client) + in, _, err = c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + require.Len(t, in.Answer, 1) + require.Len(t, in.Extra, 1) + + aRec, ok = in.Answer[0].(*dns.A) + require.True(t, ok, "Answer is not an A record") + require.Equal(t, "127.0.0.1", aRec.A.String()) + require.Equal(t, uint32(0), aRec.Hdr.Ttl) + + txt, ok = in.Extra[0].(*dns.TXT) + require.True(t, ok, "Extra record is not a TXT record") + require.Len(t, txt.Txt, 1) + require.Equal(t, "key=value", txt.Txt[0]) + + // Re-do the query, but specify the DC + m = new(dns.Msg) + m.SetQuestion("foo.node.dc1.consul.", dns.TypeANY) + + c = new(dns.Client) + in, _, err = c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + require.Len(t, in.Answer, 2) + require.Len(t, in.Extra, 0) + + aRec, ok = in.Answer[0].(*dns.A) + require.True(t, ok, "First answer is not an A record") + require.Equal(t, "127.0.0.1", aRec.A.String()) + require.Equal(t, uint32(0), aRec.Hdr.Ttl) + + _, ok = in.Answer[1].(*dns.TXT) + require.True(t, ok, "Second answer is not a TXT record") + + // lookup a non-existing node, we should receive a SOA + m = new(dns.Msg) + m.SetQuestion("nofoo.node.dc1.consul.", dns.TypeANY) + + c = new(dns.Client) + in, _, err = c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + require.Len(t, in.Ns, 1) + soaRec, ok := in.Ns[0].(*dns.SOA) + require.True(t, ok, "NS RR is not a SOA record") + require.Equal(t, uint32(0), soaRec.Hdr.Ttl) +} + +func TestDNS_CaseInsensitiveNodeLookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "Foo", + Address: "127.0.0.1", + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m := new(dns.Msg) + m.SetQuestion("fOO.node.dc1.consul.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("empty lookup: %#v", in) + } +} + +func TestDNS_NodeLookup_PeriodName(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register node with period in name + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo.bar", + Address: "127.0.0.1", + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m := new(dns.Msg) + m.SetQuestion("foo.bar.node.consul.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + aRec, ok := in.Answer[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Answer[0]) + } +} + +func TestDNS_NodeLookup_AAAA(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "::4242:4242", + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m := new(dns.Msg) + m.SetQuestion("bar.node.consul.", dns.TypeAAAA) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + aRec, ok := in.Answer[0].(*dns.AAAA) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if aRec.AAAA.String() != "::4242:4242" { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if aRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) } } -func TestDNS_CycleRecursorCheck(t *testing.T) { +func TestDNSCycleRecursorCheck(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } + t.Parallel() // Start a DNS recursor that returns a SERVFAIL server1 := makeRecursor(t, dns.Msg{ MsgHdr: dns.MsgHdr{Rcode: dns.RcodeServerFailure}, @@ -263,35 +465,31 @@ func TestDNS_CycleRecursorCheck(t *testing.T) { }, }) defer server2.Shutdown() - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - // Mock the agent startup with the necessary configs - agent := NewTestAgent(t, - `recursors = ["`+server1.Addr+`", "`+server2.Addr+`"] - `+experimentsHCL) - defer agent.Shutdown() - // DNS Message init - m := new(dns.Msg) - m.SetQuestion("google.com.", dns.TypeA) - // Agent request - client := new(dns.Client) - in, _, _ := client.Exchange(m, agent.DNSAddr()) - wantAnswer := []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{Name: "www.google.com.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, - A: []byte{0xAC, 0x15, 0x2D, 0x43}, // 172 , 21, 45, 67 - }, - } - require.NotNil(t, in) - require.Equal(t, wantAnswer, in.Answer) - }) + // Mock the agent startup with the necessary configs + agent := NewTestAgent(t, + `recursors = ["`+server1.Addr+`", "`+server2.Addr+`"] + `) + defer agent.Shutdown() + // DNS Message init + m := new(dns.Msg) + m.SetQuestion("google.com.", dns.TypeA) + // Agent request + client := new(dns.Client) + in, _, _ := client.Exchange(m, agent.DNSAddr()) + wantAnswer := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: "www.google.com.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, + A: []byte{0xAC, 0x15, 0x2D, 0x43}, // 172 , 21, 45, 67 + }, } + require.Equal(t, wantAnswer, in.Answer) } -func TestDNS_CycleRecursorCheckAllFail(t *testing.T) { +func TestDNSCycleRecursorCheckAllFail(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } + t.Parallel() // Start 3 DNS recursors that returns a REFUSED status server1 := makeRecursor(t, dns.Msg{ MsgHdr: dns.MsgHdr{Rcode: dns.RcodeRefused}, @@ -305,1313 +503,5972 @@ func TestDNS_CycleRecursorCheckAllFail(t *testing.T) { MsgHdr: dns.MsgHdr{Rcode: dns.RcodeRefused}, }) defer server3.Shutdown() - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - // Mock the agent startup with the necessary configs - agent := NewTestAgent(t, - `recursors = ["`+server1.Addr+`", "`+server2.Addr+`","`+server3.Addr+`"] - `+experimentsHCL) - defer agent.Shutdown() - // DNS dummy message initialization - m := new(dns.Msg) - m.SetQuestion("google.com.", dns.TypeA) - // Agent request - client := new(dns.Client) - in, _, err := client.Exchange(m, agent.DNSAddr()) - require.NoError(t, err) - // Verify if we hit SERVFAIL from Consul - require.NotNil(t, in) - require.Equal(t, dns.RcodeServerFailure, in.Rcode) - }) - } + // Mock the agent startup with the necessary configs + agent := NewTestAgent(t, + `recursors = ["`+server1.Addr+`", "`+server2.Addr+`","`+server3.Addr+`"] + `) + defer agent.Shutdown() + // DNS dummy message initialization + m := new(dns.Msg) + m.SetQuestion("google.com.", dns.TypeA) + // Agent request + client := new(dns.Client) + in, _, err := client.Exchange(m, agent.DNSAddr()) + require.NoError(t, err) + // Verify if we hit SERVFAIL from Consul + require.Equal(t, dns.RcodeServerFailure, in.Rcode) } - -func TestDNS_EDNS0(t *testing.T) { +func TestDNS_NodeLookup_CNAME(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + recursor := makeRecursor(t, dns.Msg{ + Answer: []dns.RR{ + dnsCNAME("www.google.com", "google.com"), + dnsA("google.com", "1.2.3.4"), + dnsTXT("google.com", []string{"my_txt_value"}), + }, + }) + defer recursor.Shutdown() - // Register node - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.2", - } + a := NewTestAgent(t, ` + recursors = ["`+recursor.Addr+`"] + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "google", + Address: "www.google.com", + } - m := new(dns.Msg) - m.SetEdns0(12345, true) - m.SetQuestion("foo.node.dc1.consul.", dns.TypeANY) + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + m := new(dns.Msg) + m.SetQuestion("google.node.consul.", dns.TypeANY) + m.SetEdns0(8192, true) - if len(in.Answer) != 1 { - t.Fatalf("empty lookup: %#v", in) - } - edns := in.IsEdns0() - if edns == nil { - t.Fatalf("empty edns: %#v", in) - } - if edns.UDPSize() != 12345 { - t.Fatalf("bad edns size: %d", edns.UDPSize()) - } - }) + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + wantAnswer := []dns.RR{ + &dns.CNAME{ + Hdr: dns.RR_Header{Name: "google.node.consul.", Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Ttl: 0, Rdlength: 0x10}, + Target: "www.google.com.", + }, + &dns.CNAME{ + Hdr: dns.RR_Header{Name: "www.google.com.", Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Rdlength: 0x2}, + Target: "google.com.", + }, + &dns.A{ + Hdr: dns.RR_Header{Name: "google.com.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, + A: []byte{0x1, 0x2, 0x3, 0x4}, // 1.2.3.4 + }, + &dns.TXT{ + Hdr: dns.RR_Header{Name: "google.com.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xd}, + Txt: []string{"my_txt_value"}, + }, } + require.Equal(t, wantAnswer, in.Answer) } -func TestDNS_EDNS0_ECS(t *testing.T) { +func TestDNS_NodeLookup_TXT(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + a := NewTestAgent(t, ``) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - } + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "google", + Address: "127.0.0.1", + NodeMeta: map[string]string{ + "rfc1035-00": "value0", + "key0": "value1", + }, + } - var out struct{} - require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) - } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - require.NoError(t, a.RPC(context.Background(), "PreparedQuery.Apply", args, &id)) - } + m := new(dns.Msg) + m.SetQuestion("google.node.consul.", dns.TypeTXT) - cases := []struct { - Name string - Question string - SubnetAddr string - SourceNetmask uint8 - ExpectedScope uint8 - }{ - {"global", "db.service.consul.", "198.18.0.1", 32, 0}, - {"query", "test.query.consul.", "198.18.0.1", 32, 32}, - {"query-subnet", "test.query.consul.", "198.18.0.0", 21, 21}, - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - c := new(dns.Client) - // Query the service directly - should have a globally valid scope (0) - m := new(dns.Msg) - edns := new(dns.OPT) - edns.Hdr.Name = "." - edns.Hdr.Rrtype = dns.TypeOPT - edns.SetUDPSize(12345) - edns.SetDo(true) - subnetOp := new(dns.EDNS0_SUBNET) - subnetOp.Code = dns.EDNS0SUBNET - subnetOp.Family = 1 - subnetOp.SourceNetmask = tc.SourceNetmask - subnetOp.Address = net.ParseIP(tc.SubnetAddr) - edns.Option = append(edns.Option, subnetOp) - m.Extra = append(m.Extra, edns) - m.SetQuestion(tc.Question, dns.TypeA) + // Should have the 1 TXT record reply + if len(in.Answer) != 2 { + t.Fatalf("Bad: %#v", in) + } - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - require.Len(t, in.Answer, 1) - aRec, ok := in.Answer[0].(*dns.A) - require.True(t, ok) - require.Equal(t, "127.0.0.1", aRec.A.String()) - - optRR := in.IsEdns0() - require.NotNil(t, optRR) - require.Len(t, optRR.Option, 1) - - subnet, ok := optRR.Option[0].(*dns.EDNS0_SUBNET) - require.True(t, ok) - require.Equal(t, uint16(1), subnet.Family) - require.Equal(t, tc.SourceNetmask, subnet.SourceNetmask) - require.Equal(t, tc.ExpectedScope, subnet.SourceScope) - require.Equal(t, net.ParseIP(tc.SubnetAddr), subnet.Address) - }) - } - }) + txtRec, ok := in.Answer[0].(*dns.TXT) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if len(txtRec.Txt) != 1 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if txtRec.Txt[0] != "value0" && txtRec.Txt[0] != "key0=value1" { + t.Fatalf("Bad: %#v", in.Answer[0]) } } -func TestDNS_SOA_Settings(t *testing.T) { +func TestDNS_NodeLookup_TXT_DontSuppress(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - testSoaWithConfig := func(config string, ttl, expire, refresh, retry uint) { - a := NewTestAgent(t, config) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + a := NewTestAgent(t, `dns_config = { enable_additional_node_meta_txt = false }`) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // lookup a non-existing node, we should receive a SOA - m := new(dns.Msg) - m.SetQuestion("nofoo.node.dc1.consul.", dns.TypeANY) + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "google", + Address: "127.0.0.1", + NodeMeta: map[string]string{ + "rfc1035-00": "value0", + "key0": "value1", + }, + } - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - require.Len(t, in.Ns, 1) - soaRec, ok := in.Ns[0].(*dns.SOA) - require.True(t, ok, "NS RR is not a SOA record") - require.Equal(t, uint32(ttl), soaRec.Minttl) - require.Equal(t, uint32(expire), soaRec.Expire) - require.Equal(t, uint32(refresh), soaRec.Refresh) - require.Equal(t, uint32(retry), soaRec.Retry) - require.Equal(t, uint32(ttl), soaRec.Hdr.Ttl) + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - // Default configuration - testSoaWithConfig(experimentsHCL, 0, 86400, 3600, 600) - // Override all settings - testSoaWithConfig("dns_config={soa={min_ttl=60,expire=43200,refresh=1800,retry=300}} "+experimentsHCL, 60, 43200, 1800, 300) - // Override partial settings - testSoaWithConfig("dns_config={soa={min_ttl=60,expire=43200}} "+experimentsHCL, 60, 43200, 3600, 600) - // Override partial settings, part II - testSoaWithConfig("dns_config={soa={refresh=1800,retry=300}} "+experimentsHCL, 0, 86400, 1800, 300) - }) + m := new(dns.Msg) + m.SetQuestion("google.node.consul.", dns.TypeTXT) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should have the 1 TXT record reply + if len(in.Answer) != 2 { + t.Fatalf("Bad: %#v", in) + } + + txtRec, ok := in.Answer[0].(*dns.TXT) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if len(txtRec.Txt) != 1 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if txtRec.Txt[0] != "value0" && txtRec.Txt[0] != "key0=value1" { + t.Fatalf("Bad: %#v", in.Answer[0]) } } -func TestDNS_VirtualIPLookup(t *testing.T) { +func TestDNS_NodeLookup_ANY(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := StartTestAgent(t, TestAgent{HCL: experimentsHCL, Overrides: `peering = { test_allow_peer_registrations = true } log_level = "debug"`}) - defer a.Shutdown() - - testrpc.WaitForLeader(t, a.RPC, "dc1") - - server, ok := a.delegate.(*consul.Server) - require.True(t, ok) - - // The proxy service will not receive a virtual IP if the server is not assigning virtual IPs yet. - retry.Run(t, func(r *retry.R) { - _, entry, err := server.FSM().State().SystemMetadataGet(nil, structs.SystemMetadataVirtualIPsEnabled) - require.NoError(r, err) - require.NotNil(r, entry) - }) - - type testCase struct { - name string - reg *structs.RegisterRequest - question string - expect string - } - - run := func(t *testing.T, tc testCase) { - var out struct{} - require.Nil(t, a.RPC(context.Background(), "Catalog.Register", tc.reg, &out)) + a := NewTestAgent(t, ``) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - m := new(dns.Msg) - m.SetQuestion(tc.question, dns.TypeA) + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.1", + NodeMeta: map[string]string{ + "key": "value", + }, + } - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.Nil(t, err) - require.Len(t, in.Answer, 1) + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - aRec, ok := in.Answer[0].(*dns.A) - require.True(t, ok) - require.Equal(t, tc.expect, aRec.A.String()) - } + m := new(dns.Msg) + m.SetQuestion("bar.node.consul.", dns.TypeANY) - tt := []testCase{ - { - name: "local query", - reg: &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.55", - Service: &structs.NodeService{ - Kind: structs.ServiceKindConnectProxy, - Service: "web-proxy", - Port: 12345, - Proxy: structs.ConnectProxyConfig{ - DestinationServiceName: "db", - }, - }, - }, - question: "db.virtual.consul.", - expect: "240.0.0.1", - }, - { - name: "query for imported service", - reg: &structs.RegisterRequest{ - PeerName: "frontend", - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.55", - Service: &structs.NodeService{ - PeerName: "frontend", - Kind: structs.ServiceKindConnectProxy, - Service: "web-proxy", - Port: 12345, - Proxy: structs.ConnectProxyConfig{ - DestinationServiceName: "db", - }, - }, - }, - question: "db.virtual.frontend.consul.", - expect: "240.0.0.2", - }, - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - run(t, tc) - }) - } - }) + wantAnswer := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, + A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 + }, + &dns.TXT{ + Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xa}, + Txt: []string{"key=value"}, + }, } + require.Equal(t, wantAnswer, in.Answer) } -func TestDNS_InifiniteRecursion(t *testing.T) { +func TestDNS_NodeLookup_ANY_DontSuppressTXT(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - // This test should not create an infinite recursion - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - domain = "CONSUL." - node_name = "test node" - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + a := NewTestAgent(t, `dns_config = { enable_additional_node_meta_txt = false }`) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Register the initial node with a service - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "web", - Address: "web.service.consul.", - Service: &structs.NodeService{ - Service: "web", - Port: 12345, - Address: "web.service.consul.", - }, - } + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.1", + NodeMeta: map[string]string{ + "key": "value", + }, + } - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - // Look up the service directly - questions := []string{ - "web.service.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeA) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + m := new(dns.Msg) + m.SetQuestion("bar.node.consul.", dns.TypeANY) - if len(in.Answer) < 1 { - t.Fatalf("Bad: %#v", in) - } - aRec, ok := in.Answer[0].(*dns.CNAME) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aRec.Target != "web.service.consul." { - t.Fatalf("Bad: %#v, target:=%s", aRec, aRec.Target) - } - } - }) + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + wantAnswer := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, + A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 + }, + &dns.TXT{ + Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xa}, + Txt: []string{"key=value"}, + }, } + require.Equal(t, wantAnswer, in.Answer) } -func TestDNS_NSRecords(t *testing.T) { +func TestDNS_NodeLookup_A_SuppressTXT(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - domain = "CONSUL." - node_name = "server1" - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForTestAgent(t, a.RPC, "dc1") + a := NewTestAgent(t, `dns_config = { enable_additional_node_meta_txt = false }`) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - m := new(dns.Msg) - m.SetQuestion("something.node.consul.", dns.TypeNS) + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.1", + NodeMeta: map[string]string{ + "key": "value", + }, + } - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + var out struct{} + require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) - wantAnswer := []dns.RR{ - &dns.NS{ - Hdr: dns.RR_Header{Name: "consul.", Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: 0, Rdlength: 0x13}, - Ns: "server1.node.dc1.consul.", - }, - } - require.Equal(t, wantAnswer, in.Answer, "answer") - wantExtra := []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{Name: "server1.node.dc1.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4, Ttl: 0}, - A: net.ParseIP("127.0.0.1").To4(), - }, - } + m := new(dns.Msg) + m.SetQuestion("bar.node.consul.", dns.TypeA) - require.Equal(t, wantExtra, in.Extra, "extra") - }) + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + + wantAnswer := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, + A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 + }, } + require.Equal(t, wantAnswer, in.Answer) + + // ensure TXT RR suppression + require.Len(t, in.Extra, 0) } -func TestDNS_AltDomain_NSRecords(t *testing.T) { +func TestDNS_EDNS0(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - a := NewTestAgent(t, ` - domain = "CONSUL." - node_name = "server1" - alt_domain = "test-domain." - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForTestAgent(t, a.RPC, "dc1") - - questions := []struct { - ask string - domain string - wantDomain string - }{ - {"something.node.consul.", "consul.", "server1.node.dc1.consul."}, - {"something.node.test-domain.", "test-domain.", "server1.node.dc1.test-domain."}, - } + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.2", + } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question.ask, dns.TypeNS) + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + m := new(dns.Msg) + m.SetEdns0(12345, true) + m.SetQuestion("foo.node.dc1.consul.", dns.TypeANY) - wantAnswer := []dns.RR{ - &dns.NS{ - Hdr: dns.RR_Header{Name: question.domain, Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: 0, Rdlength: 0x13}, - Ns: question.wantDomain, - }, - } - require.Equal(t, wantAnswer, in.Answer, "answer") - wantExtra := []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{Name: question.wantDomain, Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4, Ttl: 0}, - A: net.ParseIP("127.0.0.1").To4(), - }, - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - require.Equal(t, wantExtra, in.Extra, "extra") - } - }) + if len(in.Answer) != 1 { + t.Fatalf("empty lookup: %#v", in) + } + edns := in.IsEdns0() + if edns == nil { + t.Fatalf("empty edns: %#v", in) + } + if edns.UDPSize() != 12345 { + t.Fatalf("bad edns size: %d", edns.UDPSize()) } } -func TestDNS_NSRecords_IPV6(t *testing.T) { +func TestDNS_EDNS0_ECS(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - domain = "CONSUL." - node_name = "server1" - advertise_addr = "::1" - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForTestAgent(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - m := new(dns.Msg) - m.SetQuestion("server1.node.dc1.consul.", dns.TypeNS) + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + } + + var out struct{} + require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + require.NoError(t, a.RPC(context.Background(), "PreparedQuery.Apply", args, &id)) + } + + cases := []struct { + Name string + Question string + SubnetAddr string + SourceNetmask uint8 + ExpectedScope uint8 + }{ + {"global", "db.service.consul.", "198.18.0.1", 32, 0}, + {"query", "test.query.consul.", "198.18.0.1", 32, 32}, + {"query-subnet", "test.query.consul.", "198.18.0.0", 21, 21}, + } + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { c := new(dns.Client) + // Query the service directly - should have a globally valid scope (0) + m := new(dns.Msg) + edns := new(dns.OPT) + edns.Hdr.Name = "." + edns.Hdr.Rrtype = dns.TypeOPT + edns.SetUDPSize(12345) + edns.SetDo(true) + subnetOp := new(dns.EDNS0_SUBNET) + subnetOp.Code = dns.EDNS0SUBNET + subnetOp.Family = 1 + subnetOp.SourceNetmask = tc.SourceNetmask + subnetOp.Address = net.ParseIP(tc.SubnetAddr) + edns.Option = append(edns.Option, subnetOp) + m.Extra = append(m.Extra, edns) + m.SetQuestion(tc.Question, dns.TypeA) + in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, err) + require.Len(t, in.Answer, 1) + aRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok) + require.Equal(t, "127.0.0.1", aRec.A.String()) - wantAnswer := []dns.RR{ - &dns.NS{ - Hdr: dns.RR_Header{Name: "consul.", Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: 0, Rdlength: 0x2}, - Ns: "server1.node.dc1.consul.", - }, - } - require.Equal(t, wantAnswer, in.Answer, "answer") - wantExtra := []dns.RR{ - &dns.AAAA{ - Hdr: dns.RR_Header{Name: "server1.node.dc1.consul.", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Rdlength: 0x10, Ttl: 0}, - AAAA: net.ParseIP("::1"), - }, - } + optRR := in.IsEdns0() + require.NotNil(t, optRR) + require.Len(t, optRR.Option, 1) - require.Equal(t, wantExtra, in.Extra, "extra") + subnet, ok := optRR.Option[0].(*dns.EDNS0_SUBNET) + require.True(t, ok) + require.Equal(t, uint16(1), subnet.Family) + require.Equal(t, tc.SourceNetmask, subnet.SourceNetmask) + require.Equal(t, tc.ExpectedScope, subnet.SourceScope) + require.Equal(t, net.ParseIP(tc.SubnetAddr), subnet.Address) }) } } -func TestDNS_AltDomain_NSRecords_IPV6(t *testing.T) { +func TestDNS_ReverseLookup(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - domain = "CONSUL." - node_name = "server1" - advertise_addr = "::1" - alt_domain = "test-domain." - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForTestAgent(t, a.RPC, "dc1") - - questions := []struct { - ask string - domain string - wantDomain string - }{ - {"server1.node.dc1.consul.", "consul.", "server1.node.dc1.consul."}, - {"server1.node.dc1.test-domain.", "test-domain.", "server1.node.dc1.test-domain."}, - } + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question.ask, dns.TypeNS) + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo2", + Address: "127.0.0.2", + } - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - wantAnswer := []dns.RR{ - &dns.NS{ - Hdr: dns.RR_Header{Name: question.domain, Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: 0, Rdlength: 0x2}, - Ns: question.wantDomain, - }, - } - require.Equal(t, wantAnswer, in.Answer, "answer") - wantExtra := []dns.RR{ - &dns.AAAA{ - Hdr: dns.RR_Header{Name: question.wantDomain, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Rdlength: 0x10, Ttl: 0}, - AAAA: net.ParseIP("::1"), - }, - } + m := new(dns.Msg) + m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY) - require.Equal(t, wantExtra, in.Extra, "extra") - } - }) + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + ptrRec, ok := in.Answer[0].(*dns.PTR) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if ptrRec.Ptr != "foo2.node.dc1.consul." { + t.Fatalf("Bad: %#v", ptrRec) } } -func TestDNS_Lookup_TaggedIPAddresses(t *testing.T) { +func TestDNS_ReverseLookup_CustomDomain(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, ` + domain = "custom" + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo2", + Address: "127.0.0.2", + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m := new(dns.Msg) + m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + ptrRec, ok := in.Answer[0].(*dns.PTR) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if ptrRec.Ptr != "foo2.node.dc1.custom." { + t.Fatalf("Bad: %#v", ptrRec) + } +} + +func TestDNS_ReverseLookup_IPV6(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "::4242:4242", + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m := new(dns.Msg) + m.SetQuestion("2.4.2.4.2.4.2.4.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + ptrRec, ok := in.Answer[0].(*dns.PTR) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if ptrRec.Ptr != "bar.node.dc1.consul." { + t.Fatalf("Bad: %#v", ptrRec) + } +} + +func TestDNS_ServiceReverseLookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + Address: "127.0.0.2", + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + m := new(dns.Msg) + m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + ptrRec, ok := in.Answer[0].(*dns.PTR) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if ptrRec.Ptr != serviceCanonicalDNSName("db", "service", "dc1", "consul", nil)+"." { + t.Fatalf("Bad: %#v", ptrRec) + } +} + +func TestDNS_ServiceReverseLookup_IPV6(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "2001:db8::1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + Address: "2001:db8::ff00:42:8329", + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + m := new(dns.Msg) + m.SetQuestion("9.2.3.8.2.4.0.0.0.0.f.f.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + ptrRec, ok := in.Answer[0].(*dns.PTR) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if ptrRec.Ptr != serviceCanonicalDNSName("db", "service", "dc1", "consul", nil)+"." { + t.Fatalf("Bad: %#v", ptrRec) + } +} + +func TestDNS_ServiceReverseLookup_CustomDomain(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + domain = "custom" + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + Address: "127.0.0.2", + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + m := new(dns.Msg) + m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + ptrRec, ok := in.Answer[0].(*dns.PTR) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if ptrRec.Ptr != serviceCanonicalDNSName("db", "service", "dc1", "custom", nil)+"." { + t.Fatalf("Bad: %#v", ptrRec) + } +} + +func TestDNS_SOA_Settings(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + testSoaWithConfig := func(config string, ttl, expire, refresh, retry uint) { + a := NewTestAgent(t, config) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // lookup a non-existing node, we should receive a SOA + m := new(dns.Msg) + m.SetQuestion("nofoo.node.dc1.consul.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + require.Len(t, in.Ns, 1) + soaRec, ok := in.Ns[0].(*dns.SOA) + require.True(t, ok, "NS RR is not a SOA record") + require.Equal(t, uint32(ttl), soaRec.Minttl) + require.Equal(t, uint32(expire), soaRec.Expire) + require.Equal(t, uint32(refresh), soaRec.Refresh) + require.Equal(t, uint32(retry), soaRec.Retry) + require.Equal(t, uint32(ttl), soaRec.Hdr.Ttl) + } + // Default configuration + testSoaWithConfig("", 0, 86400, 3600, 600) + // Override all settings + testSoaWithConfig("dns_config={soa={min_ttl=60,expire=43200,refresh=1800,retry=300}}", 60, 43200, 1800, 300) + // Override partial settings + testSoaWithConfig("dns_config={soa={min_ttl=60,expire=43200}}", 60, 43200, 3600, 600) + // Override partial settings, part II + testSoaWithConfig("dns_config={soa={refresh=1800,retry=300}}", 0, 86400, 1800, 300) +} + +func TestDNS_ServiceReverseLookupNodeAddress(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + Address: "127.0.0.1", + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + m := new(dns.Msg) + m.SetQuestion("1.0.0.127.in-addr.arpa.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + ptrRec, ok := in.Answer[0].(*dns.PTR) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if ptrRec.Ptr != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", ptrRec) + } +} + +func TestDNS_ServiceLookupNoMultiCNAME(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "198.18.0.1", + Service: &structs.NodeService{ + Service: "db", + Port: 12345, + Address: "foo.node.consul", + }, + } + + var out struct{} + require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) + } + + // Register a second node node with the same service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "198.18.0.2", + Service: &structs.NodeService{ + Service: "db", + Port: 12345, + Address: "bar.node.consul", + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + m := new(dns.Msg) + m.SetQuestion("db.service.consul.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + + // expect a CNAME and an A RR + require.Len(t, in.Answer, 2) + require.IsType(t, &dns.CNAME{}, in.Answer[0]) + require.IsType(t, &dns.A{}, in.Answer[1]) +} + +func TestDNS_ServiceLookupPreferNoCNAME(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "198.18.0.1", + Service: &structs.NodeService{ + Service: "db", + Port: 12345, + Address: "198.18.0.1", + }, + } + + var out struct{} + require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) + } + + // Register a second node node with the same service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "198.18.0.2", + Service: &structs.NodeService{ + Service: "db", + Port: 12345, + Address: "bar.node.consul", + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + m := new(dns.Msg) + m.SetQuestion("db.service.consul.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + + // expect a CNAME and an A RR + require.Len(t, in.Answer, 1) + aRec, ok := in.Answer[0].(*dns.A) + require.Truef(t, ok, "Not an A RR") + + require.Equal(t, "db.service.consul.", aRec.Hdr.Name) + require.Equal(t, "198.18.0.1", aRec.A.String()) +} + +func TestDNS_ServiceLookupMultiAddrNoCNAME(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "198.18.0.1", + Service: &structs.NodeService{ + Service: "db", + Port: 12345, + Address: "198.18.0.1", + }, + } + + var out struct{} + require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) + } + + // Register a second node node with the same service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "198.18.0.2", + Service: &structs.NodeService{ + Service: "db", + Port: 12345, + Address: "bar.node.consul", + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register a second node node with the same service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "baz", + Address: "198.18.0.3", + Service: &structs.NodeService{ + Service: "db", + Port: 12345, + Address: "198.18.0.3", + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + m := new(dns.Msg) + m.SetQuestion("db.service.consul.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + + // expect a CNAME and an A RR + require.Len(t, in.Answer, 2) + require.IsType(t, &dns.A{}, in.Answer[0]) + require.IsType(t, &dns.A{}, in.Answer[1]) +} + +func TestDNS_ServiceLookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. + questions := []string{ + "db.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Name != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + } + + // Lookup a non-existing service/query, we should receive an SOA. + questions = []string{ + "nodb.service.consul.", + "nope.query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Ns) != 1 { + t.Fatalf("Bad: %#v", in) + } + + soaRec, ok := in.Ns[0].(*dns.SOA) + if !ok { + t.Fatalf("Bad: %#v", in.Ns[0]) + } + if soaRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Ns[0]) + } + + } +} + +func TestDNS_ServiceLookupWithInternalServiceAddress(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + node_name = "my.test-node" + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + // The service is using the consul DNS name as service address + // which triggers a lookup loop and a subsequent stack overflow + // crash. + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Address: "db.service.consul", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + // Looking up the service should not trigger a loop + m := new(dns.Msg) + m.SetQuestion("db.service.consul.", dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + wantAnswer := []dns.RR{ + &dns.SRV{ + Hdr: dns.RR_Header{Name: "db.service.consul.", Rrtype: 0x21, Class: 0x1, Rdlength: 0x1b}, + Priority: 0x1, + Weight: 0x1, + Port: 12345, + Target: "foo.node.dc1.consul.", + }, + } + require.Equal(t, wantAnswer, in.Answer, "answer") + wantExtra := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: "foo.node.dc1.consul.", Rrtype: 0x1, Class: 0x1, Rdlength: 0x4}, + A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 + }, + } + require.Equal(t, wantExtra, in.Extra, "extra") +} + +func TestDNS_ConnectServiceLookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register + { + args := structs.TestRegisterRequestProxy(t) + args.Address = "127.0.0.55" + args.Service.Proxy.DestinationServiceName = "db" + args.Service.Address = "" + args.Service.Port = 12345 + var out struct{} + require.Nil(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) + } + + // Look up the service + questions := []string{ + "db.connect.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.Nil(t, err) + require.Len(t, in.Answer, 1) + + srvRec, ok := in.Answer[0].(*dns.SRV) + require.True(t, ok) + require.Equal(t, uint16(12345), srvRec.Port) + require.Equal(t, "foo.node.dc1.consul.", srvRec.Target) + require.Equal(t, uint32(0), srvRec.Hdr.Ttl) + + cnameRec, ok := in.Extra[0].(*dns.A) + require.True(t, ok) + require.Equal(t, "foo.node.dc1.consul.", cnameRec.Hdr.Name) + require.Equal(t, uint32(0), srvRec.Hdr.Ttl) + require.Equal(t, "127.0.0.55", cnameRec.A.String()) + } +} + +func TestDNS_VirtualIPLookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + a := StartTestAgent(t, TestAgent{HCL: ``, Overrides: `peering = { test_allow_peer_registrations = true }`}) + defer a.Shutdown() + + testrpc.WaitForLeader(t, a.RPC, "dc1") + + server, ok := a.delegate.(*consul.Server) + require.True(t, ok) + + // The proxy service will not receive a virtual IP if the server is not assigning virtual IPs yet. + retry.Run(t, func(r *retry.R) { + _, entry, err := server.FSM().State().SystemMetadataGet(nil, structs.SystemMetadataVirtualIPsEnabled) + require.NoError(r, err) + require.NotNil(r, entry) + }) + + type testCase struct { + name string + reg *structs.RegisterRequest + question string + expect string + } + + run := func(t *testing.T, tc testCase) { + var out struct{} + require.Nil(t, a.RPC(context.Background(), "Catalog.Register", tc.reg, &out)) + + m := new(dns.Msg) + m.SetQuestion(tc.question, dns.TypeA) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.Nil(t, err) + require.Len(t, in.Answer, 1) + + aRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok) + require.Equal(t, tc.expect, aRec.A.String()) + } + + tt := []testCase{ + { + name: "local query", + reg: &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.55", + Service: &structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + Service: "web-proxy", + Port: 12345, + Proxy: structs.ConnectProxyConfig{ + DestinationServiceName: "db", + }, + }, + }, + question: "db.virtual.consul.", + expect: "240.0.0.1", + }, + { + name: "query for imported service", + reg: &structs.RegisterRequest{ + PeerName: "frontend", + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.55", + Service: &structs.NodeService{ + PeerName: "frontend", + Kind: structs.ServiceKindConnectProxy, + Service: "web-proxy", + Port: 12345, + Proxy: structs.ConnectProxyConfig{ + DestinationServiceName: "db", + }, + }, + }, + question: "db.virtual.frontend.consul.", + expect: "240.0.0.2", + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestDNS_IngressServiceLookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register ingress-gateway service + { + args := structs.TestRegisterIngressGateway(t) + var out struct{} + require.Nil(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) + } + + // Register db service + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Address: "", + Port: 80, + }, + } + + var out struct{} + require.Nil(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) + } + + // Register proxy-defaults with 'http' protocol + { + req := structs.ConfigEntryRequest{ + Op: structs.ConfigEntryUpsert, + Datacenter: "dc1", + Entry: &structs.ProxyConfigEntry{ + Kind: structs.ProxyDefaults, + Name: structs.ProxyConfigGlobal, + Config: map[string]interface{}{ + "protocol": "http", + }, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + var out bool + require.Nil(t, a.RPC(context.Background(), "ConfigEntry.Apply", req, &out)) + require.True(t, out) + } + + // Register ingress-gateway config entry + { + args := &structs.IngressGatewayConfigEntry{ + Name: "ingress-gateway", + Kind: structs.IngressGateway, + Listeners: []structs.IngressListener{ + { + Port: 8888, + Protocol: "http", + Services: []structs.IngressService{ + {Name: "db"}, + {Name: "api"}, + }, + }, + }, + } + + req := structs.ConfigEntryRequest{ + Op: structs.ConfigEntryUpsert, + Datacenter: "dc1", + Entry: args, + } + var out bool + require.Nil(t, a.RPC(context.Background(), "ConfigEntry.Apply", req, &out)) + require.True(t, out) + } + + // Look up the service + questions := []string{ + "api.ingress.consul.", + "api.ingress.dc1.consul.", + "db.ingress.consul.", + "db.ingress.dc1.consul.", + } + for _, question := range questions { + t.Run(question, func(t *testing.T) { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeA) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.Nil(t, err) + require.Len(t, in.Answer, 1) + + cnameRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok) + require.Equal(t, question, cnameRec.Hdr.Name) + require.Equal(t, uint32(0), cnameRec.Hdr.Ttl) + require.Equal(t, "127.0.0.1", cnameRec.A.String()) + }) + } +} + +func TestDNS_ExternalServiceLookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with an external service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "www.google.com", + Service: &structs.NodeService{ + Service: "db", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service + questions := []string{ + "db.service.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "www.google.com." { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + } +} + +func TestDNS_InifiniteRecursion(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + // This test should not create an infinite recursion + t.Parallel() + a := NewTestAgent(t, ` + domain = "CONSUL." + node_name = "test node" + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register the initial node with a service + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "web", + Address: "web.service.consul.", + Service: &structs.NodeService{ + Service: "web", + Port: 12345, + Address: "web.service.consul.", + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly + questions := []string{ + "web.service.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeA) + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) < 1 { + t.Fatalf("Bad: %#v", in) + } + aRec, ok := in.Answer[0].(*dns.CNAME) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if aRec.Target != "web.service.consul." { + t.Fatalf("Bad: %#v, target:=%s", aRec, aRec.Target) + } + } +} + +func TestDNS_ExternalServiceToConsulCNAMELookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + domain = "CONSUL." + node_name = "test node" + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register the initial node with a service + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "web", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "web", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an external service pointing to the 'web' service + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "alias", + Address: "web.service.consul", + Service: &structs.NodeService{ + Service: "alias", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly + questions := []string{ + "alias.service.consul.", + "alias.service.CoNsUl.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "web.service.consul." { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + if len(in.Extra) != 1 { + t.Fatalf("Bad: %#v", in) + } + + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Name != "web.service.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + + } +} + +func TestDNS_NSRecords(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + domain = "CONSUL." + node_name = "server1" + `) + defer a.Shutdown() + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + m := new(dns.Msg) + m.SetQuestion("something.node.consul.", dns.TypeNS) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + wantAnswer := []dns.RR{ + &dns.NS{ + Hdr: dns.RR_Header{Name: "consul.", Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: 0, Rdlength: 0x13}, + Ns: "server1.node.dc1.consul.", + }, + } + require.Equal(t, wantAnswer, in.Answer, "answer") + wantExtra := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: "server1.node.dc1.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4, Ttl: 0}, + A: net.ParseIP("127.0.0.1").To4(), + }, + } + + require.Equal(t, wantExtra, in.Extra, "extra") +} + +func TestDNS_AltDomain_NSRecords(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + domain = "CONSUL." + node_name = "server1" + alt_domain = "test-domain." + `) + defer a.Shutdown() + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + questions := []struct { + ask string + domain string + wantDomain string + }{ + {"something.node.consul.", "consul.", "server1.node.dc1.consul."}, + {"something.node.test-domain.", "test-domain.", "server1.node.dc1.test-domain."}, + } + + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question.ask, dns.TypeNS) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + wantAnswer := []dns.RR{ + &dns.NS{ + Hdr: dns.RR_Header{Name: question.domain, Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: 0, Rdlength: 0x13}, + Ns: question.wantDomain, + }, + } + require.Equal(t, wantAnswer, in.Answer, "answer") + wantExtra := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: question.wantDomain, Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4, Ttl: 0}, + A: net.ParseIP("127.0.0.1").To4(), + }, + } + + require.Equal(t, wantExtra, in.Extra, "extra") + } + +} + +func TestDNS_NSRecords_IPV6(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + domain = "CONSUL." + node_name = "server1" + advertise_addr = "::1" + `) + defer a.Shutdown() + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + m := new(dns.Msg) + m.SetQuestion("server1.node.dc1.consul.", dns.TypeNS) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + wantAnswer := []dns.RR{ + &dns.NS{ + Hdr: dns.RR_Header{Name: "consul.", Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: 0, Rdlength: 0x2}, + Ns: "server1.node.dc1.consul.", + }, + } + require.Equal(t, wantAnswer, in.Answer, "answer") + wantExtra := []dns.RR{ + &dns.AAAA{ + Hdr: dns.RR_Header{Name: "server1.node.dc1.consul.", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Rdlength: 0x10, Ttl: 0}, + AAAA: net.ParseIP("::1"), + }, + } + + require.Equal(t, wantExtra, in.Extra, "extra") + +} + +func TestDNS_AltDomain_NSRecords_IPV6(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + domain = "CONSUL." + node_name = "server1" + advertise_addr = "::1" + alt_domain = "test-domain." + `) + defer a.Shutdown() + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + questions := []struct { + ask string + domain string + wantDomain string + }{ + {"server1.node.dc1.consul.", "consul.", "server1.node.dc1.consul."}, + {"server1.node.dc1.test-domain.", "test-domain.", "server1.node.dc1.test-domain."}, + } + + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question.ask, dns.TypeNS) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + wantAnswer := []dns.RR{ + &dns.NS{ + Hdr: dns.RR_Header{Name: question.domain, Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: 0, Rdlength: 0x2}, + Ns: question.wantDomain, + }, + } + require.Equal(t, wantAnswer, in.Answer, "answer") + wantExtra := []dns.RR{ + &dns.AAAA{ + Hdr: dns.RR_Header{Name: question.wantDomain, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Rdlength: 0x10, Ttl: 0}, + AAAA: net.ParseIP("::1"), + }, + } + + require.Equal(t, wantExtra, in.Extra, "extra") + } + +} + +func TestDNS_ExternalServiceToConsulCNAMENestedLookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + node_name = "test-node" + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register the initial node with a service + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "web", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "web", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an external service pointing to the 'web' service + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "alias", + Address: "web.service.consul", + Service: &structs.NodeService{ + Service: "alias", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an external service pointing to the 'alias' service + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "alias2", + Address: "alias.service.consul", + Service: &structs.NodeService{ + Service: "alias2", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly + questions := []string{ + "alias2.service.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "alias.service.consul." { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if len(in.Extra) != 2 { + t.Fatalf("Bad: %#v", in) + } + + cnameRec, ok := in.Extra[0].(*dns.CNAME) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if cnameRec.Hdr.Name != "alias.service.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if cnameRec.Target != "web.service.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if cnameRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + + aRec, ok := in.Extra[1].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[1]) + } + if aRec.Hdr.Name != "web.service.consul." { + t.Fatalf("Bad: %#v", in.Extra[1]) + } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Extra[1]) + } + if aRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Extra[1]) + } + } +} + +func TestDNS_ServiceLookup_ServiceAddress_A(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Address: "127.0.0.2", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. + questions := []string{ + "db.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "7f000002.addr.dc1.consul." { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Name != "7f000002.addr.dc1.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.A.String() != "127.0.0.2" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + } +} + +func TestDNS_AltDomain_ServiceLookup_ServiceAddress_A(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + alt_domain = "test-domain" + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Address: "127.0.0.2", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. + questions := []struct { + ask string + wantDomain string + }{ + {"db.service.consul.", "consul."}, + {id + ".query.consul.", "consul."}, + {"db.service.test-domain.", "test-domain."}, + {id + ".query.test-domain.", "test-domain."}, + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question.ask, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "7f000002.addr.dc1."+question.wantDomain { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Name != "7f000002.addr.dc1."+question.wantDomain { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.A.String() != "127.0.0.2" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + } +} + +func TestDNS_ServiceLookup_ServiceAddress_SRV(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + recursor := makeRecursor(t, dns.Msg{ + Answer: []dns.RR{ + dnsCNAME("www.google.com", "google.com"), + dnsA("google.com", "1.2.3.4"), + }, + }) + defer recursor.Shutdown() + + a := NewTestAgent(t, ` + recursors = ["`+recursor.Addr+`"] + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service whose address isn't an IP. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Address: "www.google.com", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + // Specify prepared query name containing "." to test + // since that is technically supported (though atypical). + var id string + preparedQueryName := "query.name.with.dots" + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: preparedQueryName, + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. + questions := []string{ + "db.service.consul.", + id + ".query.consul.", + preparedQueryName + ".query.consul.", + fmt.Sprintf("_%s._tcp.query.consul.", id), + fmt.Sprintf("_%s._tcp.query.consul.", preparedQueryName), + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "www.google.com." { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + // Should have google CNAME + cnRec, ok := in.Extra[0].(*dns.CNAME) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if cnRec.Target != "google.com." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + + // Check we recursively resolve + aRec, ok := in.Extra[1].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[1]) + } + if aRec.A.String() != "1.2.3.4" { + t.Fatalf("Bad: %s", aRec.A.String()) + } + } +} + +func TestDNS_ServiceLookup_ServiceAddressIPV6(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Address: "2607:20:4005:808::200e", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. + questions := []string{ + "db.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "2607002040050808000000000000200e.addr.dc1.consul." { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + aRec, ok := in.Extra[0].(*dns.AAAA) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Name != "2607002040050808000000000000200e.addr.dc1.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.AAAA.String() != "2607:20:4005:808::200e" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + } +} + +func TestDNS_AltDomain_ServiceLookup_ServiceAddressIPV6(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + alt_domain = "test-domain" + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Address: "2607:20:4005:808::200e", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. + questions := []struct { + ask string + want string + }{ + {"db.service.consul.", "2607002040050808000000000000200e.addr.dc1.consul."}, + {"db.service.test-domain.", "2607002040050808000000000000200e.addr.dc1.test-domain."}, + {id + ".query.consul.", "2607002040050808000000000000200e.addr.dc1.consul."}, + {id + ".query.test-domain.", "2607002040050808000000000000200e.addr.dc1.test-domain."}, + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question.ask, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != question.want { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + aRec, ok := in.Extra[0].(*dns.AAAA) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Name != question.want { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.AAAA.String() != "2607:20:4005:808::200e" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + } +} + +func TestDNS_ServiceLookup_WanTranslation(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a1 := NewTestAgent(t, ` + datacenter = "dc1" + translate_wan_addrs = true + acl_datacenter = "" + `) + defer a1.Shutdown() + + a2 := NewTestAgent(t, ` + datacenter = "dc2" + translate_wan_addrs = true + acl_datacenter = "" + `) + defer a2.Shutdown() + + // Join WAN cluster + addr := fmt.Sprintf("127.0.0.1:%d", a1.Config.SerfPortWAN) + _, err := a2.JoinWAN([]string{addr}) + require.NoError(t, err) + retry.Run(t, func(r *retry.R) { + require.Len(r, a1.WANMembers(), 2) + require.Len(r, a2.WANMembers(), 2) + }) + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc2", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + require.NoError(t, a2.RPC(context.Background(), "PreparedQuery.Apply", args, &id)) + } + + type testCase struct { + nodeTaggedAddresses map[string]string + serviceAddress string + serviceTaggedAddresses map[string]structs.ServiceAddress + + dnsAddr string + + expectedPort uint16 + expectedAddress string + expectedARRName string + } + + cases := map[string]testCase{ + "node-addr-from-dc1": { + dnsAddr: a1.config.DNSAddrs[0].String(), + expectedPort: 8080, + expectedAddress: "127.0.0.1", + expectedARRName: "foo.node.dc2.consul.", + }, + "node-wan-from-dc1": { + dnsAddr: a1.config.DNSAddrs[0].String(), + nodeTaggedAddresses: map[string]string{ + "wan": "127.0.0.2", + }, + expectedPort: 8080, + expectedAddress: "127.0.0.2", + expectedARRName: "7f000002.addr.dc2.consul.", + }, + "service-addr-from-dc1": { + dnsAddr: a1.config.DNSAddrs[0].String(), + nodeTaggedAddresses: map[string]string{ + "wan": "127.0.0.2", + }, + serviceAddress: "10.0.1.1", + expectedPort: 8080, + expectedAddress: "10.0.1.1", + expectedARRName: "0a000101.addr.dc2.consul.", + }, + "service-wan-from-dc1": { + dnsAddr: a1.config.DNSAddrs[0].String(), + nodeTaggedAddresses: map[string]string{ + "wan": "127.0.0.2", + }, + serviceAddress: "10.0.1.1", + serviceTaggedAddresses: map[string]structs.ServiceAddress{ + "wan": { + Address: "198.18.0.1", + Port: 80, + }, + }, + expectedPort: 80, + expectedAddress: "198.18.0.1", + expectedARRName: "c6120001.addr.dc2.consul.", + }, + "node-addr-from-dc2": { + dnsAddr: a2.config.DNSAddrs[0].String(), + expectedPort: 8080, + expectedAddress: "127.0.0.1", + expectedARRName: "foo.node.dc2.consul.", + }, + "node-wan-from-dc2": { + dnsAddr: a2.config.DNSAddrs[0].String(), + nodeTaggedAddresses: map[string]string{ + "wan": "127.0.0.2", + }, + expectedPort: 8080, + expectedAddress: "127.0.0.1", + expectedARRName: "foo.node.dc2.consul.", + }, + "service-addr-from-dc2": { + dnsAddr: a2.config.DNSAddrs[0].String(), + nodeTaggedAddresses: map[string]string{ + "wan": "127.0.0.2", + }, + serviceAddress: "10.0.1.1", + expectedPort: 8080, + expectedAddress: "10.0.1.1", + expectedARRName: "0a000101.addr.dc2.consul.", + }, + "service-wan-from-dc2": { + dnsAddr: a2.config.DNSAddrs[0].String(), + nodeTaggedAddresses: map[string]string{ + "wan": "127.0.0.2", + }, + serviceAddress: "10.0.1.1", + serviceTaggedAddresses: map[string]structs.ServiceAddress{ + "wan": { + Address: "198.18.0.1", + Port: 80, + }, + }, + expectedPort: 8080, + expectedAddress: "10.0.1.1", + expectedARRName: "0a000101.addr.dc2.consul.", + }, + } + + for name, tc := range cases { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + // Register a remote node with a service. This is in a retry since we + // need the datacenter to have a route which takes a little more time + // beyond the join, and we don't have direct access to the router here. + retry.Run(t, func(r *retry.R) { + args := &structs.RegisterRequest{ + Datacenter: "dc2", + Node: "foo", + Address: "127.0.0.1", + TaggedAddresses: tc.nodeTaggedAddresses, + Service: &structs.NodeService{ + Service: "db", + Address: tc.serviceAddress, + Port: 8080, + TaggedAddresses: tc.serviceTaggedAddresses, + }, + } + + var out struct{} + require.NoError(r, a2.RPC(context.Background(), "Catalog.Register", args, &out)) + }) + + // Look up the SRV record via service and prepared query. + questions := []string{ + "db.service.dc2.consul.", + id + ".query.dc2.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + + addr := tc.dnsAddr + in, _, err := c.Exchange(m, addr) + require.NoError(t, err) + require.Len(t, in.Answer, 1) + srvRec, ok := in.Answer[0].(*dns.SRV) + require.True(t, ok, "Bad: %#v", in.Answer[0]) + require.Equal(t, tc.expectedPort, srvRec.Port) + + aRec, ok := in.Extra[0].(*dns.A) + require.True(t, ok, "Bad: %#v", in.Extra[0]) + require.Equal(t, tc.expectedARRName, aRec.Hdr.Name) + require.Equal(t, tc.expectedAddress, aRec.A.String()) + } + + // Also check the A record directly + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeA) + + c := new(dns.Client) + addr := tc.dnsAddr + in, _, err := c.Exchange(m, addr) + require.NoError(t, err) + require.Len(t, in.Answer, 1) + + aRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok, "Bad: %#v", in.Answer[0]) + require.Equal(t, question, aRec.Hdr.Name) + require.Equal(t, tc.expectedAddress, aRec.A.String()) + } + }) + } +} + +func TestDNS_Lookup_TaggedIPAddresses(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + require.NoError(t, a.RPC(context.Background(), "PreparedQuery.Apply", args, &id)) + } + + type testCase struct { + nodeAddress string + nodeTaggedAddresses map[string]string + serviceAddress string + serviceTaggedAddresses map[string]structs.ServiceAddress + + expectedServiceIPv4Address string + expectedServiceIPv6Address string + expectedNodeIPv4Address string + expectedNodeIPv6Address string + } + + cases := map[string]testCase{ + "simple-ipv4": { + serviceAddress: "127.0.0.2", + nodeAddress: "127.0.0.1", + + expectedServiceIPv4Address: "127.0.0.2", + expectedServiceIPv6Address: "", + expectedNodeIPv4Address: "127.0.0.1", + expectedNodeIPv6Address: "", + }, + "simple-ipv6": { + serviceAddress: "::2", + nodeAddress: "::1", + + expectedServiceIPv6Address: "::2", + expectedServiceIPv4Address: "", + expectedNodeIPv6Address: "::1", + expectedNodeIPv4Address: "", + }, + "ipv4-with-tagged-ipv6": { + serviceAddress: "127.0.0.2", + nodeAddress: "127.0.0.1", + + serviceTaggedAddresses: map[string]structs.ServiceAddress{ + structs.TaggedAddressLANIPv6: {Address: "::2"}, + }, + nodeTaggedAddresses: map[string]string{ + structs.TaggedAddressLANIPv6: "::1", + }, + + expectedServiceIPv4Address: "127.0.0.2", + expectedServiceIPv6Address: "::2", + expectedNodeIPv4Address: "127.0.0.1", + expectedNodeIPv6Address: "::1", + }, + "ipv6-with-tagged-ipv4": { + serviceAddress: "::2", + nodeAddress: "::1", + + serviceTaggedAddresses: map[string]structs.ServiceAddress{ + structs.TaggedAddressLANIPv4: {Address: "127.0.0.2"}, + }, + nodeTaggedAddresses: map[string]string{ + structs.TaggedAddressLANIPv4: "127.0.0.1", + }, + + expectedServiceIPv4Address: "127.0.0.2", + expectedServiceIPv6Address: "::2", + expectedNodeIPv4Address: "127.0.0.1", + expectedNodeIPv6Address: "::1", + }, + } + + for name, tc := range cases { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: tc.nodeAddress, + TaggedAddresses: tc.nodeTaggedAddresses, + Service: &structs.NodeService{ + Service: "db", + Address: tc.serviceAddress, + Port: 8080, + TaggedAddresses: tc.serviceTaggedAddresses, + }, + } + + var out struct{} + require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) + + // Look up the SRV record via service and prepared query. + questions := []string{ + "db.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeA) + + c := new(dns.Client) + addr := a.config.DNSAddrs[0].String() + in, _, err := c.Exchange(m, addr) + require.NoError(t, err) + + if tc.expectedServiceIPv4Address != "" { + require.Len(t, in.Answer, 1) + aRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok, "Bad: %#v", in.Answer[0]) + require.Equal(t, question, aRec.Hdr.Name) + require.Equal(t, tc.expectedServiceIPv4Address, aRec.A.String()) + } else { + require.Len(t, in.Answer, 0) + } + + m = new(dns.Msg) + m.SetQuestion(question, dns.TypeAAAA) + + c = new(dns.Client) + addr = a.config.DNSAddrs[0].String() + in, _, err = c.Exchange(m, addr) + require.NoError(t, err) + + if tc.expectedServiceIPv6Address != "" { + require.Len(t, in.Answer, 1) + aRec, ok := in.Answer[0].(*dns.AAAA) + require.True(t, ok, "Bad: %#v", in.Answer[0]) + require.Equal(t, question, aRec.Hdr.Name) + require.Equal(t, tc.expectedServiceIPv6Address, aRec.AAAA.String()) + } else { + require.Len(t, in.Answer, 0) + } + } + + // Look up node + m := new(dns.Msg) + m.SetQuestion("foo.node.consul.", dns.TypeA) + + c := new(dns.Client) + addr := a.config.DNSAddrs[0].String() + in, _, err := c.Exchange(m, addr) + require.NoError(t, err) + + if tc.expectedNodeIPv4Address != "" { + require.Len(t, in.Answer, 1) + aRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok, "Bad: %#v", in.Answer[0]) + require.Equal(t, "foo.node.consul.", aRec.Hdr.Name) + require.Equal(t, tc.expectedNodeIPv4Address, aRec.A.String()) + } else { + require.Len(t, in.Answer, 0) + } + + m = new(dns.Msg) + m.SetQuestion("foo.node.consul.", dns.TypeAAAA) + + c = new(dns.Client) + addr = a.config.DNSAddrs[0].String() + in, _, err = c.Exchange(m, addr) + require.NoError(t, err) + + if tc.expectedNodeIPv6Address != "" { + require.Len(t, in.Answer, 1) + aRec, ok := in.Answer[0].(*dns.AAAA) + require.True(t, ok, "Bad: %#v", in.Answer[0]) + require.Equal(t, "foo.node.consul.", aRec.Hdr.Name) + require.Equal(t, tc.expectedNodeIPv6Address, aRec.AAAA.String()) + } else { + require.Len(t, in.Answer, 0) + } + }) + } +} + +func TestDNS_CaseInsensitiveServiceLookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + tests := []struct { + name string + config string + }{ + // UDP + EDNS + {"normal", ""}, + {"cache", `dns_config{ allow_stale=true, max_stale="3h", use_cache=true, "cache_max_age"="3h"}`}, + {"cache-with-streaming", ` + rpc{ + enable_streaming=true + } + use_streaming_backend=true + dns_config{ allow_stale=true, max_stale="3h", use_cache=true, "cache_max_age"="3h"} + `}, + } + for _, tst := range tests { + t.Run(fmt.Sprintf("A lookup %v", tst.name), func(t *testing.T) { + a := NewTestAgent(t, tst.config) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "Db", + Tags: []string{"Primary"}, + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query, as well as a name. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, Query: &structs.PreparedQuery{ - Name: "test", + Name: "somequery", Service: structs.ServiceQuery{ Service: "db", }, }, } - require.NoError(t, a.RPC(context.Background(), "PreparedQuery.Apply", args, &id)) + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Try some variations to make sure case doesn't matter. + questions := []string{ + "primary.Db.service.consul.", + "primary.db.service.consul.", + "pRIMARY.dB.service.consul.", + "PRIMARY.dB.service.consul.", + "db.service.consul.", + "DB.service.consul.", + "Db.service.consul.", + "somequery.query.consul.", + "SomeQuery.query.consul.", + "SOMEQUERY.query.consul.", + } + + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + retry.Run(t, func(r *retry.R) { + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + r.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + r.Fatalf("question %v, empty lookup: %#v", question, in) + } + }) + } + }) + } +} + +func TestDNS_ServiceLookup_TagPeriod(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"v1.primary"}, + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m1 := new(dns.Msg) + m1.SetQuestion("v1.primary2.db.service.consul.", dns.TypeSRV) + + c1 := new(dns.Client) + in, _, err := c1.Exchange(m1, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 0 { + t.Fatalf("Bad: %#v", in) + } + + m := new(dns.Msg) + m.SetQuestion("v1.primary.db.service.consul.", dns.TypeSRV) + + c := new(dns.Client) + in, _, err = c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", srvRec) + } + + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Name != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } +} + +func TestDNS_PreparedQueryNearIPEDNS(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + ipCoord := lib.GenerateCoordinate(1 * time.Millisecond) + serviceNodes := []struct { + name string + address string + coord *coordinate.Coordinate + }{ + {"foo1", "198.18.0.1", lib.GenerateCoordinate(1 * time.Millisecond)}, + {"foo2", "198.18.0.2", lib.GenerateCoordinate(10 * time.Millisecond)}, + {"foo3", "198.18.0.3", lib.GenerateCoordinate(30 * time.Millisecond)}, + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + added := 0 + + // Register nodes with a service + for _, cfg := range serviceNodes { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: cfg.name, + Address: cfg.address, + Service: &structs.NodeService{ + Service: "db", + Port: 12345, + }, + } + + var out struct{} + err := a.RPC(context.Background(), "Catalog.Register", args, &out) + require.NoError(t, err) + + // Send coordinate updates + coordArgs := structs.CoordinateUpdateRequest{ + Datacenter: "dc1", + Node: cfg.name, + Coord: cfg.coord, + } + err = a.RPC(context.Background(), "Coordinate.Update", &coordArgs, &out) + require.NoError(t, err) + + added += 1 + } + + fmt.Printf("Added %d service nodes\n", added) + + // Register a node without a service + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "198.18.0.9", + } + + var out struct{} + err := a.RPC(context.Background(), "Catalog.Register", args, &out) + require.NoError(t, err) + + // Send coordinate updates for a few nodes. + coordArgs := structs.CoordinateUpdateRequest{ + Datacenter: "dc1", + Node: "bar", + Coord: ipCoord, + } + err = a.RPC(context.Background(), "Coordinate.Update", &coordArgs, &out) + require.NoError(t, err) + } + + // Register a prepared query Near = _ip + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "some.query.we.like", + Service: structs.ServiceQuery{ + Service: "db", + Near: "_ip", + }, + }, + } + + var id string + err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id) + require.NoError(t, err) + } + retry.Run(t, func(r *retry.R) { + m := new(dns.Msg) + m.SetQuestion("some.query.we.like.query.consul.", dns.TypeA) + m.SetEdns0(4096, false) + o := new(dns.OPT) + o.Hdr.Name = "." + o.Hdr.Rrtype = dns.TypeOPT + e := new(dns.EDNS0_SUBNET) + e.Code = dns.EDNS0SUBNET + e.Family = 1 + e.SourceNetmask = 32 + e.SourceScope = 0 + e.Address = net.ParseIP("198.18.0.9").To4() + o.Option = append(o.Option, e) + m.Extra = append(m.Extra, o) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + r.Fatalf("Error with call to dns.Client.Exchange: %s", err) + } + + if len(serviceNodes) != len(in.Answer) { + r.Fatalf("Expecting %d A RRs in response, Actual found was %d", len(serviceNodes), len(in.Answer)) + } + + for i, rr := range in.Answer { + if aRec, ok := rr.(*dns.A); ok { + if actual := aRec.A.String(); serviceNodes[i].address != actual { + r.Fatalf("Expecting A RR #%d = %s, Actual RR was %s", i, serviceNodes[i].address, actual) + } + } else { + r.Fatalf("DNS Answer contained a non-A RR") + } + } + }) +} + +func TestDNS_PreparedQueryNearIP(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + ipCoord := lib.GenerateCoordinate(1 * time.Millisecond) + serviceNodes := []struct { + name string + address string + coord *coordinate.Coordinate + }{ + {"foo1", "198.18.0.1", lib.GenerateCoordinate(1 * time.Millisecond)}, + {"foo2", "198.18.0.2", lib.GenerateCoordinate(10 * time.Millisecond)}, + {"foo3", "198.18.0.3", lib.GenerateCoordinate(30 * time.Millisecond)}, + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + added := 0 + + // Register nodes with a service + for _, cfg := range serviceNodes { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: cfg.name, + Address: cfg.address, + Service: &structs.NodeService{ + Service: "db", + Port: 12345, + }, + } + + var out struct{} + err := a.RPC(context.Background(), "Catalog.Register", args, &out) + require.NoError(t, err) + + // Send coordinate updates + coordArgs := structs.CoordinateUpdateRequest{ + Datacenter: "dc1", + Node: cfg.name, + Coord: cfg.coord, + } + err = a.RPC(context.Background(), "Coordinate.Update", &coordArgs, &out) + require.NoError(t, err) + + added += 1 + } + + fmt.Printf("Added %d service nodes\n", added) + + // Register a node without a service + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "198.18.0.9", + } + + var out struct{} + err := a.RPC(context.Background(), "Catalog.Register", args, &out) + require.NoError(t, err) + + // Send coordinate updates for a few nodes. + coordArgs := structs.CoordinateUpdateRequest{ + Datacenter: "dc1", + Node: "bar", + Coord: ipCoord, + } + err = a.RPC(context.Background(), "Coordinate.Update", &coordArgs, &out) + require.NoError(t, err) + } + + // Register a prepared query Near = _ip + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "some.query.we.like", + Service: structs.ServiceQuery{ + Service: "db", + Near: "_ip", + }, + }, + } + + var id string + err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id) + require.NoError(t, err) + } + + retry.Run(t, func(r *retry.R) { + m := new(dns.Msg) + m.SetQuestion("some.query.we.like.query.consul.", dns.TypeA) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + r.Fatalf("Error with call to dns.Client.Exchange: %s", err) + } + + if len(serviceNodes) != len(in.Answer) { + r.Fatalf("Expecting %d A RRs in response, Actual found was %d", len(serviceNodes), len(in.Answer)) + } + + for i, rr := range in.Answer { + if aRec, ok := rr.(*dns.A); ok { + if actual := aRec.A.String(); serviceNodes[i].address != actual { + r.Fatalf("Expecting A RR #%d = %s, Actual RR was %s", i, serviceNodes[i].address, actual) + } + } else { + r.Fatalf("DNS Answer contained a non-A RR") + } + } + }) +} + +func TestDNS_ServiceLookup_PreparedQueryNamePeriod(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register a prepared query with a period in the name. + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "some.query.we.like", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + + var id string + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + m := new(dns.Msg) + m.SetQuestion("some.query.we.like.query.consul.", dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", srvRec) + } + + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Name != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } +} + +func TestDNS_ServiceLookup_Dedup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a single node with multiple instances of a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args = &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + ID: "db2", + Service: "db", + Tags: []string{"replica"}, + Port: 12345, + }, + } + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args = &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + ID: "db3", + Service: "db", + Tags: []string{"replica"}, + Port: 12346, + }, + } + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query, make sure only + // one IP is returned. + questions := []string{ + "db.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + aRec, ok := in.Answer[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + } +} + +func TestDNS_ServiceLookup_Dedup_SRV(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a single node with multiple instances of a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args = &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + ID: "db2", + Service: "db", + Tags: []string{"replica"}, + Port: 12345, + }, + } + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args = &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + ID: "db3", + Service: "db", + Tags: []string{"replica"}, + Port: 12346, + }, + } + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query, make sure only + // one IP is returned and two unique ports are returned. + questions := []string{ + "db.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 2 { + t.Fatalf("Bad: %#v", in) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 && srvRec.Port != 12346 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", srvRec) + } + + srvRec, ok = in.Answer[1].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[1]) + } + if srvRec.Port != 12346 && srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Port == in.Answer[0].(*dns.SRV).Port { + t.Fatalf("should be a different port") + } + if srvRec.Target != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", srvRec) + } + + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Name != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + } +} + +func TestDNS_Recurse(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + recursor := makeRecursor(t, dns.Msg{ + Answer: []dns.RR{dnsA("apple.com", "1.2.3.4")}, + }) + defer recursor.Shutdown() + + a := NewTestAgent(t, ` + recursors = ["`+recursor.Addr+`"] + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + m := new(dns.Msg) + m.SetQuestion("apple.com.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) == 0 { + t.Fatalf("Bad: %#v", in) + } + if in.Rcode != dns.RcodeSuccess { + t.Fatalf("Bad: %#v", in) + } +} + +func TestDNS_Recurse_Truncation(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + recursor := makeRecursor(t, dns.Msg{ + MsgHdr: dns.MsgHdr{Truncated: true}, + Answer: []dns.RR{dnsA("apple.com", "1.2.3.4")}, + }) + defer recursor.Shutdown() + + a := NewTestAgent(t, ` + recursors = ["`+recursor.Addr+`"] + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + m := new(dns.Msg) + m.SetQuestion("apple.com.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + if in.Truncated != true { + t.Fatalf("err: message should have been truncated %v", in) + } + if len(in.Answer) == 0 { + t.Fatalf("Bad: Truncated message ignored, expected some reply %#v", in) + } + if in.Rcode != dns.RcodeSuccess { + t.Fatalf("Bad: %#v", in) + } +} + +func TestDNS_RecursorTimeout(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + serverClientTimeout := 3 * time.Second + testClientTimeout := serverClientTimeout + 5*time.Second + + resolverAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") + if err != nil { + t.Error(err) + } + + resolver, err := net.ListenUDP("udp", resolverAddr) + if err != nil { + t.Error(err) + } + defer resolver.Close() + + a := NewTestAgent(t, ` + recursors = ["`+resolver.LocalAddr().String()+`"] // host must cause a connection|read|write timeout + dns_config { + recursor_timeout = "`+serverClientTimeout.String()+`" + } + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + m := new(dns.Msg) + m.SetQuestion("apple.com.", dns.TypeANY) + + // This client calling the server under test must have a longer timeout than the one we set internally + c := &dns.Client{Timeout: testClientTimeout} + + start := time.Now() + in, _, err := c.Exchange(m, a.DNSAddr()) + + duration := time.Since(start) + + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 0 { + t.Fatalf("Bad: %#v", in) + } + if in.Rcode != dns.RcodeServerFailure { + t.Fatalf("Bad: %#v", in) + } + + if duration < serverClientTimeout { + t.Fatalf("Expected the call to return after at least %f seconds but lasted only %f", serverClientTimeout.Seconds(), duration.Seconds()) + } + +} + +func TestDNS_ServiceLookup_FilterCritical(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register nodes with health checks in various states. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + Check: &structs.HealthCheck{ + CheckID: "serf", + Name: "serf", + Status: api.HealthCritical, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args2 := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.2", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + Check: &structs.HealthCheck{ + CheckID: "serf", + Name: "serf", + Status: api.HealthCritical, + }, + } + if err := a.RPC(context.Background(), "Catalog.Register", args2, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args3 := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.2", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + Check: &structs.HealthCheck{ + CheckID: "db", + Name: "db", + ServiceID: "db", + Status: api.HealthCritical, + }, + } + if err := a.RPC(context.Background(), "Catalog.Register", args3, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args4 := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "baz", + Address: "127.0.0.3", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + } + if err := a.RPC(context.Background(), "Catalog.Register", args4, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args5 := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "quux", + Address: "127.0.0.4", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + Check: &structs.HealthCheck{ + CheckID: "db", + Name: "db", + ServiceID: "db", + Status: api.HealthWarning, + }, + } + if err := a.RPC(context.Background(), "Catalog.Register", args5, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. + questions := []string{ + "db.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Only 4 and 5 are not failing, so we should get 2 answers + if len(in.Answer) != 2 { + t.Fatalf("Bad: %#v", in) + } + + ips := make(map[string]bool) + for _, resp := range in.Answer { + aRec := resp.(*dns.A) + ips[aRec.A.String()] = true + } + + if !ips["127.0.0.3"] { + t.Fatalf("Bad: %#v should contain 127.0.0.3 (state healthy)", in) + } + if !ips["127.0.0.4"] { + t.Fatalf("Bad: %#v should contain 127.0.0.4 (state warning)", in) + } + } +} + +func TestDNS_ServiceLookup_OnlyFailing(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register nodes with all health checks in a critical state. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + Check: &structs.HealthCheck{ + CheckID: "serf", + Name: "serf", + Status: api.HealthCritical, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args2 := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.2", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + Check: &structs.HealthCheck{ + CheckID: "serf", + Name: "serf", + Status: api.HealthCritical, + }, + } + if err := a.RPC(context.Background(), "Catalog.Register", args2, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args3 := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.2", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + Check: &structs.HealthCheck{ + CheckID: "db", + Name: "db", + ServiceID: "db", + Status: api.HealthCritical, + }, + } + if err := a.RPC(context.Background(), "Catalog.Register", args3, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. + questions := []string{ + "db.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + // All 3 are failing, so we should get 0 answers and an NXDOMAIN response + if len(in.Answer) != 0 { + t.Fatalf("Bad: %#v", in) + } + + if in.Rcode != dns.RcodeNameError { + t.Fatalf("Bad: %#v", in) + } + } +} + +func TestDNS_ServiceLookup_OnlyPassing(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + dns_config { + only_passing = true + } + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register nodes with health checks in various states. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + Check: &structs.HealthCheck{ + CheckID: "db", + Name: "db", + ServiceID: "db", + Status: api.HealthPassing, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args2 := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.2", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + Check: &structs.HealthCheck{ + CheckID: "db", + Name: "db", + ServiceID: "db", + Status: api.HealthWarning, + }, + } + + if err := a.RPC(context.Background(), "Catalog.Register", args2, &out); err != nil { + t.Fatalf("err: %v", err) + } + + args3 := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "baz", + Address: "127.0.0.3", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + Check: &structs.HealthCheck{ + CheckID: "db", + Name: "db", + ServiceID: "db", + Status: api.HealthCritical, + }, + } + + if err := a.RPC(context.Background(), "Catalog.Register", args3, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + OnlyPassing: true, + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. + questions := []string{ + "db.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Only 1 is passing, so we should only get 1 answer + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + resp := in.Answer[0] + aRec := resp.(*dns.A) + + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + } + + newCfg := *a.Config + newCfg.DNSOnlyPassing = false + err := a.reloadConfigInternal(&newCfg) + require.NoError(t, err) + + // only_passing is now false. we should now get two nodes + m := new(dns.Msg) + m.SetQuestion("db.service.consul.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + + require.Equal(t, 2, len(in.Answer)) + ips := []string{in.Answer[0].(*dns.A).A.String(), in.Answer[1].(*dns.A).A.String()} + sort.Strings(ips) + require.Equal(t, []string{"127.0.0.1", "127.0.0.2"}, ips) +} + +func TestDNS_ServiceLookup_Randomize(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a large number of nodes. + for i := 0; i < generateNumNodes; i++ { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: fmt.Sprintf("foo%d", i), + Address: fmt.Sprintf("127.0.0.%d", i+1), + Service: &structs.NodeService{ + Service: "web", + Port: 8000, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "web", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. Ensure the + // response is randomized each time. + questions := []string{ + "web.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + uniques := map[string]struct{}{} + for i := 0; i < 10; i++ { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeANY) + + c := &dns.Client{Net: "udp"} + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Response length should be truncated and we should get + // an A record for each response. + if len(in.Answer) != defaultNumUDPResponses { + t.Fatalf("Bad: %#v", len(in.Answer)) + } + + // Collect all the names. + var names []string + for _, rec := range in.Answer { + switch v := rec.(type) { + case *dns.SRV: + names = append(names, v.Target) + case *dns.A: + names = append(names, v.A.String()) + } + } + nameS := strings.Join(names, "|") + + // Tally the results. + uniques[nameS] = struct{}{} + } + + // Give some wiggle room. Since the responses are randomized and + // there is a finite number of combinations, requiring 0 + // duplicates every test run eventually gives us failures. + if len(uniques) < 2 { + t.Fatalf("unique response ratio too low: %d/10\n%v", len(uniques), uniques) + } + } +} + +func TestBinarySearch(t *testing.T) { + t.Parallel() + msgSrc := new(dns.Msg) + msgSrc.Compress = true + msgSrc.SetQuestion("redis.service.consul.", dns.TypeSRV) + + for i := 0; i < 5000; i++ { + target := fmt.Sprintf("host-redis-%d-%d.test.acme.com.node.dc1.consul.", i/256, i%256) + msgSrc.Answer = append(msgSrc.Answer, &dns.SRV{Hdr: dns.RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: dns.TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target}) + msgSrc.Extra = append(msgSrc.Extra, &dns.CNAME{Hdr: dns.RR_Header{Name: target, Class: 1, Rrtype: dns.TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.%d.%d.", i/256, i%256)}) + } + for _, compress := range []bool{true, false} { + for idx, maxSize := range []int{12, 256, 512, 8192, 65535} { + t.Run(fmt.Sprintf("binarySearch %d", maxSize), func(t *testing.T) { + msg := new(dns.Msg) + msgSrc.Compress = compress + msgSrc.SetQuestion("redis.service.consul.", dns.TypeSRV) + msg.Answer = msgSrc.Answer + msg.Extra = msgSrc.Extra + msg.Ns = msgSrc.Ns + index := make(map[string]dns.RR, len(msg.Extra)) + indexRRs(msg.Extra, index) + blen := dnsBinaryTruncate(msg, maxSize, index, true) + msg.Answer = msg.Answer[:blen] + syncExtra(index, msg) + predicted := msg.Len() + buf, err := msg.Pack() + if err != nil { + t.Error(err) + } + if predicted < len(buf) { + t.Fatalf("Bug in DNS library: %d != %d", predicted, len(buf)) + } + if len(buf) > maxSize || (idx != 0 && len(buf) < 16) { + t.Fatalf("bad[%d]: %d > %d", idx, len(buf), maxSize) + } + }) + } + } +} + +func TestDNS_TCP_and_UDP_Truncate(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + dns_config { + enable_truncate = true + } + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + services := []string{"normal", "truncated"} + for index, service := range services { + numServices := (index * 5000) + 2 + var eg errgroup.Group + for i := 1; i < numServices; i++ { + j := i + eg.Go(func() error { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: fmt.Sprintf("%s-%d.acme.com", service, j), + Address: fmt.Sprintf("127.%d.%d.%d", 0, (j / 255), j%255), + Service: &structs.NodeService{ + Service: service, + Port: 8000, + }, + } + + var out struct{} + return a.RPC(context.Background(), "Catalog.Register", args, &out) + }) + } + if err := eg.Wait(); err != nil { + t.Fatalf("error registering: %v", err) + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: service, + Service: structs.ServiceQuery{ + Service: service, + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. Ensure the + // response is truncated each time. + questions := []string{ + fmt.Sprintf("%s.service.consul.", service), + id + ".query.consul.", + } + protocols := []string{ + "tcp", + "udp", + } + for _, maxSize := range []uint16{8192, 65535} { + for _, qType := range []uint16{dns.TypeANY, dns.TypeA, dns.TypeSRV} { + for _, question := range questions { + for _, protocol := range protocols { + for _, compress := range []bool{true, false} { + t.Run(fmt.Sprintf("lookup %s %s (qType:=%d) compressed=%v", question, protocol, qType, compress), func(t *testing.T) { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeANY) + maxSz := maxSize + if protocol == "udp" { + maxSz = 8192 + } + m.SetEdns0(maxSz, true) + c := new(dns.Client) + c.Net = protocol + m.Compress = compress + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + // actually check if we need to have the truncate bit + resbuf, err := in.Pack() + if err != nil { + t.Fatalf("Error while packing answer: %s", err) + } + if !in.Truncated && len(resbuf) > int(maxSz) { + t.Fatalf("should have truncate bit %#v %#v", in, len(in.Answer)) + } + // Check for the truncate bit + buf, err := m.Pack() + info := fmt.Sprintf("service %s question:=%s (%s) (%d total records) sz:= %d in %v", + service, question, protocol, numServices, len(in.Answer), in) + if err != nil { + t.Fatalf("Error while packing: %v ; info:=%s", err, info) + } + if len(buf) > int(maxSz) { + t.Fatalf("len(buf) := %d > maxSz=%d for %v", len(buf), maxSz, info) + } + }) + } + } + } + } + } + } +} + +func TestDNS_ServiceLookup_Truncate(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + dns_config { + enable_truncate = true + } + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a large number of nodes. + for i := 0; i < generateNumNodes; i++ { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: fmt.Sprintf("foo%d", i), + Address: fmt.Sprintf("127.0.0.%d", i+1), + Service: &structs.NodeService{ + Service: "web", + Port: 8000, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "web", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. Ensure the + // response is truncated each time. + questions := []string{ + "web.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Check for the truncate bit + if !in.Truncated { + t.Fatalf("should have truncate bit") + } + } +} + +func TestDNS_ServiceLookup_LargeResponses(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + dns_config { + enable_truncate = true + } + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + longServiceName := "this-is-a-very-very-very-very-very-long-name-for-a-service" + + // Register a lot of nodes. + for i := 0; i < 4; i++ { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: fmt.Sprintf("foo%d", i), + Address: fmt.Sprintf("127.0.0.%d", i+1), + Service: &structs.NodeService{ + Service: longServiceName, + Tags: []string{"primary"}, + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: longServiceName, + Service: structs.ServiceQuery{ + Service: longServiceName, + Tags: []string{"primary"}, + }, + }, + } + var id string + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. + questions := []string{ + "_" + longServiceName + "._primary.service.consul.", + longServiceName + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if !in.Truncated { + t.Fatalf("should have truncate bit") + } + + // Make sure the response size is RFC 1035-compliant for UDP messages + if in.Len() > 512 { + t.Fatalf("Bad: %d", in.Len()) + } + + // We should only have two answers now + if len(in.Answer) != 2 { + t.Fatalf("Bad: %d", len(in.Answer)) + } + + // Make sure the ADDITIONAL section matches the ANSWER section. + if len(in.Answer) != len(in.Extra) { + t.Fatalf("Bad: %d vs. %d", len(in.Answer), len(in.Extra)) + } + for i := 0; i < len(in.Answer); i++ { + srv, ok := in.Answer[i].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[i]) + } + + a, ok := in.Extra[i].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[i]) + } + + if srv.Target != a.Hdr.Name { + t.Fatalf("Bad: %#v %#v", srv, a) + } + } + + // Check for the truncate bit + if !in.Truncated { + t.Fatalf("should have truncate bit") + } + } +} + +func testDNSServiceLookupResponseLimits(t *testing.T, answerLimit int, qType uint16, + expectedService, expectedQuery, expectedQueryID int) (bool, error) { + a := NewTestAgent(t, ` + node_name = "test-node" + dns_config { + udp_answer_limit = `+fmt.Sprintf("%d", answerLimit)+` + } + `) + defer a.Shutdown() + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + choices := perfectlyRandomChoices(generateNumNodes, pctNodesWithIPv6) + for i := 0; i < generateNumNodes; i++ { + nodeAddress := fmt.Sprintf("127.0.0.%d", i+1) + if choices[i] { + nodeAddress = fmt.Sprintf("fe80::%d", i+1) + } + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: fmt.Sprintf("foo%d", i), + Address: nodeAddress, + Service: &structs.NodeService{ + Service: "api-tier", + Port: 8080, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + return false, fmt.Errorf("err: %v", err) + } + } + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "api-tier", + Service: structs.ServiceQuery{ + Service: "api-tier", + }, + }, + } + + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + return false, fmt.Errorf("err: %v", err) + } + } + + // Look up the service directly and via prepared query. + questions := []string{ + "api-tier.service.consul.", + "api-tier.query.consul.", + id + ".query.consul.", + } + for idx, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, qType) + + c := &dns.Client{Net: "udp"} + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + return false, fmt.Errorf("err: %v", err) + } + + switch idx { + case 0: + if (expectedService > 0 && len(in.Answer) != expectedService) || + (expectedService < -1 && len(in.Answer) < lib.AbsInt(expectedService)) { + return false, fmt.Errorf("%d/%d answers received for type %v for %s, sz:=%d", len(in.Answer), answerLimit, qType, question, in.Len()) + } + case 1: + if (expectedQuery > 0 && len(in.Answer) != expectedQuery) || + (expectedQuery < -1 && len(in.Answer) < lib.AbsInt(expectedQuery)) { + return false, fmt.Errorf("%d/%d answers received for type %v for %s, sz:=%d", len(in.Answer), answerLimit, qType, question, in.Len()) + } + case 2: + if (expectedQueryID > 0 && len(in.Answer) != expectedQueryID) || + (expectedQueryID < -1 && len(in.Answer) < lib.AbsInt(expectedQueryID)) { + return false, fmt.Errorf("%d/%d answers received for type %v for %s, sz:=%d", len(in.Answer), answerLimit, qType, question, in.Len()) + } + default: + panic("abort") + } + } + + return true, nil +} + +func checkDNSService( + t *testing.T, + generateNumNodes int, + aRecordLimit int, + qType uint16, + expectedResultsCount int, + udpSize uint16, +) { + a := NewTestAgent(t, ` + node_name = "test-node" + dns_config { + a_record_limit = `+fmt.Sprintf("%d", aRecordLimit)+` + udp_answer_limit = `+fmt.Sprintf("%d", aRecordLimit)+` + } + `) + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + choices := perfectlyRandomChoices(generateNumNodes, pctNodesWithIPv6) + for i := 0; i < generateNumNodes; i++ { + nodeAddress := fmt.Sprintf("127.0.0.%d", i+1) + if choices[i] { + nodeAddress = fmt.Sprintf("fe80::%d", i+1) + } + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: fmt.Sprintf("foo%d", i), + Address: nodeAddress, + Service: &structs.NodeService{ + Service: "api-tier", + Port: 8080, + }, + } + + var out struct{} + require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) + } + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "api-tier", + Service: structs.ServiceQuery{ + Service: "api-tier", + }, + }, + } + + require.NoError(t, a.RPC(context.Background(), "PreparedQuery.Apply", args, &id)) + } + + // Look up the service directly and via prepared query. + questions := []string{ + "api-tier.service.consul.", + "api-tier.query.consul.", + id + ".query.consul.", + } + for _, question := range questions { + question := question + t.Run("question: "+question, func(t *testing.T) { + + m := new(dns.Msg) + + m.SetQuestion(question, qType) + protocol := "tcp" + if udpSize > 0 { + protocol = "udp" + } + if udpSize > 512 { + m.SetEdns0(udpSize, true) + } + c := &dns.Client{Net: protocol, UDPSize: 8192} + in, _, err := c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + + t.Logf("DNS Response for %+v - %+v", m, in) + + require.Equal(t, expectedResultsCount, len(in.Answer), + "%d/%d answers received for type %v for %s (%s)", len(in.Answer), expectedResultsCount, qType, question, protocol) + }) + } +} + +func TestDNS_ServiceLookup_ARecordLimits(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + tests := []struct { + name string + aRecordLimit int + expectedAResults int + expectedAAAAResults int + expectedANYResults int + expectedSRVResults int + numNodesTotal int + udpSize uint16 + _unused_udpAnswerLimit int // NOTE: this field is not used + }{ + // UDP + EDNS + {"udp-edns-1", 1, 1, 1, 1, 30, 30, 8192, 3}, + {"udp-edns-2", 2, 2, 2, 2, 30, 30, 8192, 3}, + {"udp-edns-3", 3, 3, 3, 3, 30, 30, 8192, 3}, + {"udp-edns-4", 4, 4, 4, 4, 30, 30, 8192, 3}, + {"udp-edns-5", 5, 5, 5, 5, 30, 30, 8192, 3}, + {"udp-edns-6", 6, 6, 6, 6, 30, 30, 8192, 3}, + {"udp-edns-max", 6, 2, 1, 3, 3, 3, 8192, 3}, + // All UDP without EDNS have a limit of 2 answers due to udpAnswerLimit + // Even SRV records are limit to 2 records + {"udp-limit-1", 1, 1, 0, 1, 1, 1, 512, 2}, + {"udp-limit-2", 2, 1, 1, 2, 2, 2, 512, 2}, + // AAAA results limited by size of payload + {"udp-limit-3", 3, 1, 1, 2, 2, 2, 512, 2}, + {"udp-limit-4", 4, 1, 1, 2, 2, 2, 512, 2}, + {"udp-limit-5", 5, 1, 1, 2, 2, 2, 512, 2}, + {"udp-limit-6", 6, 1, 1, 2, 2, 2, 512, 2}, + {"udp-limit-max", 6, 1, 1, 2, 2, 2, 512, 2}, + // All UDP without EDNS and no udpAnswerLimit + // Size of records is limited by UDP payload + {"udp-1", 1, 1, 0, 1, 1, 1, 512, 0}, + {"udp-2", 2, 1, 1, 2, 2, 2, 512, 0}, + {"udp-3", 3, 1, 1, 2, 2, 2, 512, 0}, + {"udp-4", 4, 1, 1, 2, 2, 2, 512, 0}, + {"udp-5", 5, 1, 1, 2, 2, 2, 512, 0}, + {"udp-6", 6, 1, 1, 2, 2, 2, 512, 0}, + // Only 3 A and 3 SRV records on 512 bytes + {"udp-max", 6, 1, 1, 2, 2, 2, 512, 0}, + + {"tcp-1", 1, 1, 1, 1, 30, 30, 0, 0}, + {"tcp-2", 2, 2, 2, 2, 30, 30, 0, 0}, + {"tcp-3", 3, 3, 3, 3, 30, 30, 0, 0}, + {"tcp-4", 4, 4, 4, 4, 30, 30, 0, 0}, + {"tcp-5", 5, 5, 5, 5, 30, 30, 0, 0}, + {"tcp-6", 6, 6, 6, 6, 30, 30, 0, 0}, + {"tcp-max", 6, 1, 1, 2, 2, 2, 0, 0}, + } + for _, test := range tests { + test := test // capture loop var + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + // All those queries should have at max queriesLimited elements + + t.Run("A", func(t *testing.T) { + t.Parallel() + checkDNSService(t, test.numNodesTotal, test.aRecordLimit, dns.TypeA, test.expectedAResults, test.udpSize) + }) + + t.Run("AAAA", func(t *testing.T) { + t.Parallel() + checkDNSService(t, test.numNodesTotal, test.aRecordLimit, dns.TypeAAAA, test.expectedAAAAResults, test.udpSize) + }) + + t.Run("ANY", func(t *testing.T) { + t.Parallel() + checkDNSService(t, test.numNodesTotal, test.aRecordLimit, dns.TypeANY, test.expectedANYResults, test.udpSize) + }) + + // No limits but the size of records for SRV records, since not subject to randomization issues + t.Run("SRV", func(t *testing.T) { + t.Parallel() + checkDNSService(t, test.expectedSRVResults, test.aRecordLimit, dns.TypeSRV, test.numNodesTotal, test.udpSize) + }) + }) + } +} + +func TestDNS_ServiceLookup_AnswerLimits(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + // Build a matrix of config parameters (udpAnswerLimit), and the + // length of the response per query type and question. Negative + // values imply the test must return at least the abs(value) number + // of records in the answer section. This is required because, for + // example, on OS-X and Linux, the number of answers returned in a + // 512B response is different even though both platforms are x86_64 + // and using the same version of Go. + // + // TODO(sean@): Why is it not identical everywhere when using the + // same compiler? + tests := []struct { + name string + udpAnswerLimit int + expectedAService int + expectedAQuery int + expectedAQueryID int + expectedAAAAService int + expectedAAAAQuery int + expectedAAAAQueryID int + expectedANYService int + expectedANYQuery int + expectedANYQueryID int + }{ + {"0", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {"1", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, + {"2", 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, + {"3", 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}, + {"4", 4, 4, 4, 4, 4, 4, 4, 4, 4, 4}, + {"5", 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}, + {"6", 6, 6, 6, 6, 6, 6, 5, 6, 6, -5}, + {"7", 7, 7, 7, 6, 7, 7, 5, 7, 7, -5}, + {"8", 8, 8, 8, 6, 8, 8, 5, 8, 8, -5}, + {"9", 9, 8, 8, 6, 8, 8, 5, 8, 8, -5}, + {"20", 20, 8, 8, 6, 8, 8, 5, 8, -5, -5}, + {"30", 30, 8, 8, 6, 8, 8, 5, 8, -5, -5}, + } + for _, test := range tests { + test := test // capture loop var + t.Run(fmt.Sprintf("A lookup %v", test), func(t *testing.T) { + t.Parallel() + ok, err := testDNSServiceLookupResponseLimits(t, test.udpAnswerLimit, dns.TypeA, test.expectedAService, test.expectedAQuery, test.expectedAQueryID) + if !ok { + t.Fatalf("Expected service A lookup %s to pass: %v", test.name, err) + } + }) + + t.Run(fmt.Sprintf("AAAA lookup %v", test), func(t *testing.T) { + t.Parallel() + ok, err := testDNSServiceLookupResponseLimits(t, test.udpAnswerLimit, dns.TypeAAAA, test.expectedAAAAService, test.expectedAAAAQuery, test.expectedAAAAQueryID) + if !ok { + t.Fatalf("Expected service AAAA lookup %s to pass: %v", test.name, err) } + }) + + t.Run(fmt.Sprintf("ANY lookup %v", test), func(t *testing.T) { + t.Parallel() + ok, err := testDNSServiceLookupResponseLimits(t, test.udpAnswerLimit, dns.TypeANY, test.expectedANYService, test.expectedANYQuery, test.expectedANYQueryID) + if !ok { + t.Fatalf("Expected service ANY lookup %s to pass: %v", test.name, err) + } + }) + } +} + +func TestDNS_ServiceLookup_CNAME(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + recursor := makeRecursor(t, dns.Msg{ + Answer: []dns.RR{ + dnsCNAME("www.google.com", "google.com"), + dnsA("google.com", "1.2.3.4"), + }, + }) + defer recursor.Shutdown() - type testCase struct { - nodeAddress string - nodeTaggedAddresses map[string]string - serviceAddress string - serviceTaggedAddresses map[string]structs.ServiceAddress + a := NewTestAgent(t, ` + recursors = ["`+recursor.Addr+`"] + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - expectedServiceIPv4Address string - expectedServiceIPv6Address string - expectedNodeIPv4Address string - expectedNodeIPv6Address string - } + // Register a node with a name for an address. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "google", + Address: "www.google.com", + Service: &structs.NodeService{ + Service: "search", + Port: 80, + }, + } - cases := map[string]testCase{ - "simple-ipv4": { - serviceAddress: "127.0.0.2", - nodeAddress: "127.0.0.1", + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } - expectedServiceIPv4Address: "127.0.0.2", - expectedServiceIPv6Address: "", - expectedNodeIPv4Address: "127.0.0.1", - expectedNodeIPv6Address: "", + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "search", }, - "simple-ipv6": { - serviceAddress: "::2", - nodeAddress: "::1", + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } - expectedServiceIPv6Address: "::2", - expectedServiceIPv4Address: "", - expectedNodeIPv6Address: "::1", - expectedNodeIPv4Address: "", - }, - "ipv4-with-tagged-ipv6": { - serviceAddress: "127.0.0.2", - nodeAddress: "127.0.0.1", + // Look up the service directly and via prepared query. + questions := []string{ + "search.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeANY) - serviceTaggedAddresses: map[string]structs.ServiceAddress{ - structs.TaggedAddressLANIPv6: {Address: "::2"}, - }, - nodeTaggedAddresses: map[string]string{ - structs.TaggedAddressLANIPv6: "::1", - }, + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - expectedServiceIPv4Address: "127.0.0.2", - expectedServiceIPv6Address: "::2", - expectedNodeIPv4Address: "127.0.0.1", - expectedNodeIPv6Address: "::1", - }, - "ipv6-with-tagged-ipv4": { - serviceAddress: "::2", - nodeAddress: "::1", + // Service CNAME, google CNAME, google A record + if len(in.Answer) != 3 { + t.Fatalf("Bad: %#v", in) + } - serviceTaggedAddresses: map[string]structs.ServiceAddress{ - structs.TaggedAddressLANIPv4: {Address: "127.0.0.2"}, - }, - nodeTaggedAddresses: map[string]string{ - structs.TaggedAddressLANIPv4: "127.0.0.1", - }, + // Should have service CNAME + cnRec, ok := in.Answer[0].(*dns.CNAME) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if cnRec.Target != "www.google.com." { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + // Should have google CNAME + cnRec, ok = in.Answer[1].(*dns.CNAME) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[1]) + } + if cnRec.Target != "google.com." { + t.Fatalf("Bad: %#v", in.Answer[1]) + } - expectedServiceIPv4Address: "127.0.0.2", - expectedServiceIPv6Address: "::2", - expectedNodeIPv4Address: "127.0.0.1", - expectedNodeIPv6Address: "::1", + // Check we recursively resolve + if _, ok := in.Answer[2].(*dns.A); !ok { + t.Fatalf("Bad: %#v", in.Answer[2]) + } + } +} + +func TestDNS_ServiceLookup_ServiceAddress_CNAME(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + recursor := makeRecursor(t, dns.Msg{ + Answer: []dns.RR{ + dnsCNAME("www.google.com", "google.com"), + dnsA("google.com", "1.2.3.4"), + }, + }) + defer recursor.Shutdown() + + a := NewTestAgent(t, ` + recursors = ["`+recursor.Addr+`"] + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register a node with a name for an address. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "google", + Address: "1.2.3.4", + Service: &structs.NodeService{ + Service: "search", + Port: 80, + Address: "www.google.com", + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "search", }, - } + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } - for name, tc := range cases { - name := name - tc := tc - t.Run(name, func(t *testing.T) { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: tc.nodeAddress, - TaggedAddresses: tc.nodeTaggedAddresses, - Service: &structs.NodeService{ - Service: "db", - Address: tc.serviceAddress, - Port: 8080, - TaggedAddresses: tc.serviceTaggedAddresses, - }, - } + // Look up the service directly and via prepared query. + questions := []string{ + "search.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeANY) - var out struct{} - require.NoError(t, a.RPC(context.Background(), "Catalog.Register", args, &out)) + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - // Look up the SRV record via service and prepared query. - questions := []string{ - "db.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeA) - - c := new(dns.Client) - addr := a.config.DNSAddrs[0].String() - in, _, err := c.Exchange(m, addr) - require.NoError(t, err) - - if tc.expectedServiceIPv4Address != "" { - require.Len(t, in.Answer, 1) - aRec, ok := in.Answer[0].(*dns.A) - require.True(t, ok, "Bad: %#v", in.Answer[0]) - require.Equal(t, question, aRec.Hdr.Name) - require.Equal(t, tc.expectedServiceIPv4Address, aRec.A.String()) - } else { - require.Len(t, in.Answer, 0) - } + // Service CNAME, google CNAME, google A record + if len(in.Answer) != 3 { + t.Fatalf("Bad: %#v", in) + } - m = new(dns.Msg) - m.SetQuestion(question, dns.TypeAAAA) - - c = new(dns.Client) - addr = a.config.DNSAddrs[0].String() - in, _, err = c.Exchange(m, addr) - require.NoError(t, err) - - if tc.expectedServiceIPv6Address != "" { - require.Len(t, in.Answer, 1) - aRec, ok := in.Answer[0].(*dns.AAAA) - require.True(t, ok, "Bad: %#v", in.Answer[0]) - require.Equal(t, question, aRec.Hdr.Name) - require.Equal(t, tc.expectedServiceIPv6Address, aRec.AAAA.String()) - } else { - require.Len(t, in.Answer, 0) - } - } + // Should have service CNAME + cnRec, ok := in.Answer[0].(*dns.CNAME) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if cnRec.Target != "www.google.com." { + t.Fatalf("Bad: %#v", in.Answer[0]) + } - // Look up node - m := new(dns.Msg) - m.SetQuestion("foo.node.consul.", dns.TypeA) - - c := new(dns.Client) - addr := a.config.DNSAddrs[0].String() - in, _, err := c.Exchange(m, addr) - require.NoError(t, err) - - if tc.expectedNodeIPv4Address != "" { - require.Len(t, in.Answer, 1) - aRec, ok := in.Answer[0].(*dns.A) - require.True(t, ok, "Bad: %#v", in.Answer[0]) - require.Equal(t, "foo.node.consul.", aRec.Hdr.Name) - require.Equal(t, tc.expectedNodeIPv4Address, aRec.A.String()) - } else { - require.Len(t, in.Answer, 0) - } + // Should have google CNAME + cnRec, ok = in.Answer[1].(*dns.CNAME) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[1]) + } + if cnRec.Target != "google.com." { + t.Fatalf("Bad: %#v", in.Answer[1]) + } - m = new(dns.Msg) - m.SetQuestion("foo.node.consul.", dns.TypeAAAA) - - c = new(dns.Client) - addr = a.config.DNSAddrs[0].String() - in, _, err = c.Exchange(m, addr) - require.NoError(t, err) - - if tc.expectedNodeIPv6Address != "" { - require.Len(t, in.Answer, 1) - aRec, ok := in.Answer[0].(*dns.AAAA) - require.True(t, ok, "Bad: %#v", in.Answer[0]) - require.Equal(t, "foo.node.consul.", aRec.Hdr.Name) - require.Equal(t, tc.expectedNodeIPv6Address, aRec.AAAA.String()) - } else { - require.Len(t, in.Answer, 0) - } - }) - } - }) + // Check we recursively resolve + if _, ok := in.Answer[2].(*dns.A); !ok { + t.Fatalf("Bad: %#v", in.Answer[2]) + } } } -func TestDNS_PreparedQueryNearIPEDNS(t *testing.T) { +func TestDNS_NodeLookup_TTL(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - ipCoord := lib.GenerateCoordinate(1 * time.Millisecond) - serviceNodes := []struct { - name string - address string - coord *coordinate.Coordinate - }{ - {"foo1", "198.18.0.1", lib.GenerateCoordinate(1 * time.Millisecond)}, - {"foo2", "198.18.0.2", lib.GenerateCoordinate(10 * time.Millisecond)}, - {"foo3", "198.18.0.3", lib.GenerateCoordinate(30 * time.Millisecond)}, + t.Parallel() + recursor := makeRecursor(t, dns.Msg{ + Answer: []dns.RR{ + dnsCNAME("www.google.com", "google.com"), + dnsA("google.com", "1.2.3.4"), + }, + }) + defer recursor.Shutdown() + + a := NewTestAgent(t, ` + recursors = ["`+recursor.Addr+`"] + dns_config { + node_ttl = "10s" + allow_stale = true + max_stale = "1s" + } + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - added := 0 + m := new(dns.Msg) + m.SetQuestion("foo.node.consul.", dns.TypeANY) - // Register nodes with a service - for _, cfg := range serviceNodes { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: cfg.name, - Address: cfg.address, - Service: &structs.NodeService{ - Service: "db", - Port: 12345, - }, - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - var out struct{} - err := a.RPC(context.Background(), "Catalog.Register", args, &out) - require.NoError(t, err) + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } - // Send coordinate updates - coordArgs := structs.CoordinateUpdateRequest{ - Datacenter: "dc1", - Node: cfg.name, - Coord: cfg.coord, - } - err = a.RPC(context.Background(), "Coordinate.Update", &coordArgs, &out) - require.NoError(t, err) + aRec, ok := in.Answer[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if aRec.Hdr.Ttl != 10 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + // Register node with IPv6 + args = &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "::4242:4242", + } + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + // Check an IPv6 record + m = new(dns.Msg) + m.SetQuestion("bar.node.consul.", dns.TypeANY) + + in, _, err = c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + + aaaaRec, ok := in.Answer[0].(*dns.AAAA) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if aaaaRec.AAAA.String() != "::4242:4242" { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if aaaaRec.Hdr.Ttl != 10 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + // Register node with CNAME + args = &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "google", + Address: "www.google.com", + } + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m = new(dns.Msg) + m.SetQuestion("google.node.consul.", dns.TypeANY) + + in, _, err = c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should have the CNAME record + a few A records + if len(in.Answer) < 2 { + t.Fatalf("Bad: %#v", in) + } + + cnRec, ok := in.Answer[0].(*dns.CNAME) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if cnRec.Target != "www.google.com." { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if cnRec.Hdr.Ttl != 10 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } +} + +func TestDNS_ServiceLookup_TTL(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } - added += 1 + t.Parallel() + a := NewTestAgent(t, ` + dns_config { + service_ttl = { + "d*" = "42s" + "db" = "10s" + "db*" = "66s" + "*" = "5s" } + allow_stale = true + max_stale = "1s" + } + `) + defer a.Shutdown() - fmt.Printf("Added %d service nodes\n", added) + for idx, service := range []string{"db", "dblb", "dk", "api"} { + nodeName := fmt.Sprintf("foo%d", idx) + address := fmt.Sprintf("127.0.0.%d", idx) + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: nodeName, + Address: address, + Service: &structs.NodeService{ + Service: service, + Tags: []string{"primary"}, + Port: 12345 + idx, + }, + } - // Register a node without a service - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "198.18.0.9", - } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } - var out struct{} - err := a.RPC(context.Background(), "Catalog.Register", args, &out) - require.NoError(t, err) + c := new(dns.Client) + expectResult := func(dnsQuery string, expectedTTL uint32) { + t.Run(dnsQuery, func(t *testing.T) { + m := new(dns.Msg) + m.SetQuestion(dnsQuery, dns.TypeSRV) - // Send coordinate updates for a few nodes. - coordArgs := structs.CoordinateUpdateRequest{ - Datacenter: "dc1", - Node: "bar", - Coord: ipCoord, - } - err = a.RPC(context.Background(), "Coordinate.Update", &coordArgs, &out) - require.NoError(t, err) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) } - // Register a prepared query Near = _ip - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "some.query.we.like", - Service: structs.ServiceQuery{ - Service: "db", - Near: "_ip", - }, + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v, len is %d", in, len(in.Answer)) + } + + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Hdr.Ttl != expectedTTL { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Ttl != expectedTTL { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + }) + } + // Should have its exact TTL + expectResult("db.service.consul.", 10) + // Should match db* + expectResult("dblb.service.consul.", 66) + // Should match d* + expectResult("dk.service.consul.", 42) + // Should match * + expectResult("api.service.consul.", 5) +} + +func TestDNS_PreparedQuery_TTL(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, ` + dns_config { + service_ttl = { + "d*" = "42s" + "db" = "10s" + "db*" = "66s" + "*" = "5s" + } + allow_stale = true + max_stale = "1s" + } + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + for idx, service := range []string{"db", "dblb", "dk", "api"} { + nodeName := fmt.Sprintf("foo%d", idx) + address := fmt.Sprintf("127.0.0.%d", idx) + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: nodeName, + Address: address, + Service: &structs.NodeService{ + Service: service, + Tags: []string{"primary"}, + Port: 12345 + idx, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + // Register prepared query without TTL and with TTL + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: service, + Service: structs.ServiceQuery{ + Service: service, }, - } + }, + } - var id string - err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id) - require.NoError(t, err) + var id string + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + queryTTL := fmt.Sprintf("%s-ttl", service) + args = &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: queryTTL, + Service: structs.ServiceQuery{ + Service: service, + }, + DNS: structs.QueryDNSOptions{ + TTL: "18s", + }, + }, + } + + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } + } + + c := new(dns.Client) + expectResult := func(dnsQuery string, expectedTTL uint32) { + t.Run(dnsQuery, func(t *testing.T) { + m := new(dns.Msg) + m.SetQuestion(dnsQuery, dns.TypeSRV) + + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) } - retry.Run(t, func(r *retry.R) { - m := new(dns.Msg) - m.SetQuestion("some.query.we.like.query.consul.", dns.TypeA) - m.SetEdns0(4096, false) - o := new(dns.OPT) - o.Hdr.Name = "." - o.Hdr.Rrtype = dns.TypeOPT - e := new(dns.EDNS0_SUBNET) - e.Code = dns.EDNS0SUBNET - e.Family = 1 - e.SourceNetmask = 32 - e.SourceScope = 0 - e.Address = net.ParseIP("198.18.0.9").To4() - o.Option = append(o.Option, e) - m.Extra = append(m.Extra, o) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - r.Fatalf("Error with call to dns.Client.Exchange: %s", err) - } + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v, len is %d", in, len(in.Answer)) + } - if len(serviceNodes) != len(in.Answer) { - r.Fatalf("Expecting %d A RRs in response, Actual found was %d", len(serviceNodes), len(in.Answer)) - } + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Hdr.Ttl != expectedTTL { + t.Fatalf("Bad: %#v", in.Answer[0]) + } - for i, rr := range in.Answer { - if aRec, ok := rr.(*dns.A); ok { - if actual := aRec.A.String(); serviceNodes[i].address != actual { - r.Fatalf("Expecting A RR #%d = %s, Actual RR was %s", i, serviceNodes[i].address, actual) - } - } else { - r.Fatalf("DNS Answer contained a non-A RR") - } - } - }) + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Ttl != expectedTTL { + t.Fatalf("Bad: %#v", in.Extra[0]) + } }) } + + // Should have its exact TTL + expectResult("db.query.consul.", 10) + expectResult("db-ttl.query.consul.", 18) + // Should match db* + expectResult("dblb.query.consul.", 66) + expectResult("dblb-ttl.query.consul.", 18) + // Should match d* + expectResult("dk.query.consul.", 42) + expectResult("dk-ttl.query.consul.", 18) + // Should be the default value + expectResult("api.query.consul.", 5) + expectResult("api-ttl.query.consul.", 18) } -func TestDNS_PreparedQueryNearIP(t *testing.T) { +func TestDNS_PreparedQuery_Failover(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - ipCoord := lib.GenerateCoordinate(1 * time.Millisecond) - serviceNodes := []struct { - name string - address string - coord *coordinate.Coordinate - }{ - {"foo1", "198.18.0.1", lib.GenerateCoordinate(1 * time.Millisecond)}, - {"foo2", "198.18.0.2", lib.GenerateCoordinate(10 * time.Millisecond)}, - {"foo3", "198.18.0.3", lib.GenerateCoordinate(30 * time.Millisecond)}, + t.Parallel() + a1 := NewTestAgent(t, ` + datacenter = "dc1" + translate_wan_addrs = true + acl_datacenter = "" + `) + defer a1.Shutdown() + + a2 := NewTestAgent(t, ` + datacenter = "dc2" + translate_wan_addrs = true + acl_datacenter = "" + `) + defer a2.Shutdown() + + // Join WAN cluster. + addr := fmt.Sprintf("127.0.0.1:%d", a1.Config.SerfPortWAN) + if _, err := a2.JoinWAN([]string{addr}); err != nil { + t.Fatalf("err: %v", err) } + retry.Run(t, func(r *retry.R) { + if got, want := len(a1.WANMembers()), 2; got < want { + r.Fatalf("got %d WAN members want at least %d", got, want) + } + if got, want := len(a2.WANMembers()), 2; got < want { + r.Fatalf("got %d WAN members want at least %d", got, want) + } + }) - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + // Register a remote node with a service. This is in a retry since we + // need the datacenter to have a route which takes a little more time + // beyond the join, and we don't have direct access to the router here. + retry.Run(t, func(r *retry.R) { + args := &structs.RegisterRequest{ + Datacenter: "dc2", + Node: "foo", + Address: "127.0.0.1", + TaggedAddresses: map[string]string{ + "wan": "127.0.0.2", + }, + Service: &structs.NodeService{ + Service: "db", + }, + } - added := 0 + var out struct{} + if err := a2.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + r.Fatalf("err: %v", err) + } + }) - // Register nodes with a service - for _, cfg := range serviceNodes { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: cfg.name, - Address: cfg.address, - Service: &structs.NodeService{ - Service: "db", - Port: 12345, + // Register a local prepared query. + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "my-query", + Service: structs.ServiceQuery{ + Service: "db", + Failover: structs.QueryFailoverOptions{ + Datacenters: []string{"dc2"}, }, - } + }, + }, + } + var id string + if err := a1.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } - var out struct{} - err := a.RPC(context.Background(), "Catalog.Register", args, &out) - require.NoError(t, err) + // Look up the SRV record via the query. + m := new(dns.Msg) + m.SetQuestion("my-query.query.consul.", dns.TypeSRV) - // Send coordinate updates - coordArgs := structs.CoordinateUpdateRequest{ - Datacenter: "dc1", - Node: cfg.name, - Coord: cfg.coord, - } - err = a.RPC(context.Background(), "Coordinate.Update", &coordArgs, &out) - require.NoError(t, err) + c := new(dns.Client) + clAddr := a1.config.DNSAddrs[0] + in, _, err := c.Exchange(m, clAddr.String()) + if err != nil { + t.Fatalf("err: %v", err) + } - added += 1 - } + // Make sure we see the remote DC and that the address gets + // translated. + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } + if in.Answer[0].Header().Name != "my-query.query.consul." { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + srv, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srv.Target != "7f000002.addr.dc2.consul." { + t.Fatalf("Bad: %#v", in.Answer[0]) + } - fmt.Printf("Added %d service nodes\n", added) + a, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if a.Hdr.Name != "7f000002.addr.dc2.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if a.A.String() != "127.0.0.2" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } +} - // Register a node without a service - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "198.18.0.9", - } +func TestDNS_ServiceLookup_SRV_RFC(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } - var out struct{} - err := a.RPC(context.Background(), "Catalog.Register", args, &out) - require.NoError(t, err) + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Send coordinate updates for a few nodes. - coordArgs := structs.CoordinateUpdateRequest{ - Datacenter: "dc1", - Node: "bar", - Coord: ipCoord, - } - err = a.RPC(context.Background(), "Coordinate.Update", &coordArgs, &out) - require.NoError(t, err) - } + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + } - // Register a prepared query Near = _ip - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "some.query.we.like", - Service: structs.ServiceQuery{ - Service: "db", - Near: "_ip", - }, - }, - } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - var id string - err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id) - require.NoError(t, err) - } + questions := []string{ + "_db._primary.service.dc1.consul.", + "_db._primary.service.consul.", + "_db._primary.dc1.consul.", + "_db._primary.consul.", + } - retry.Run(t, func(r *retry.R) { - m := new(dns.Msg) - m.SetQuestion("some.query.we.like.query.consul.", dns.TypeA) + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - r.Fatalf("Error with call to dns.Client.Exchange: %s", err) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - if len(serviceNodes) != len(in.Answer) { - r.Fatalf("Expecting %d A RRs in response, Actual found was %d", len(serviceNodes), len(in.Answer)) - } + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } - for i, rr := range in.Answer { - if aRec, ok := rr.(*dns.A); ok { - if actual := aRec.A.String(); serviceNodes[i].address != actual { - r.Fatalf("Expecting A RR #%d = %s, Actual RR was %s", i, serviceNodes[i].address, actual) - } - } else { - r.Fatalf("DNS Answer contained a non-A RR") - } - } - }) - }) + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Name != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Extra[0]) + } } + } -func TestDNS_Recurse(t *testing.T) { +func TestDNS_ServiceLookup_SRV_RFC_TCP_Default(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - recursor := makeRecursor(t, dns.Msg{ - Answer: []dns.RR{dnsA("apple.com", "1.2.3.4")}, - }) - defer recursor.Shutdown() - - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - recursors = ["`+recursor.Addr+`"] - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - m := new(dns.Msg) - m.SetQuestion("apple.com.", dns.TypeANY) + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + // Register node + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + } - if len(in.Answer) == 0 { - t.Fatalf("Bad: %#v", in) - } - if in.Rcode != dns.RcodeSuccess { - t.Fatalf("Bad: %#v", in) - } - }) + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) } -} -func TestDNS_Recurse_Truncation(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") + questions := []string{ + "_db._tcp.service.dc1.consul.", + "_db._tcp.service.consul.", + "_db._tcp.dc1.consul.", + "_db._tcp.consul.", } - recursor := makeRecursor(t, dns.Msg{ - MsgHdr: dns.MsgHdr{Truncated: true}, - Answer: []dns.RR{dnsA("apple.com", "1.2.3.4")}, - }) - defer recursor.Shutdown() + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - recursors = ["`+recursor.Addr+`"] - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - m := new(dns.Msg) - m.SetQuestion("apple.com.", dns.TypeANY) + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - if in.Truncated != true { - t.Fatalf("err: message should have been truncated %v", in) - } - if len(in.Answer) == 0 { - t.Fatalf("Bad: Truncated message ignored, expected some reply %#v", in) - } - if in.Rcode != dns.RcodeSuccess { - t.Fatalf("Bad: %#v", in) - } - }) + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Target != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", srvRec) + } + if srvRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Name != "foo.node.dc1.consul." { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } + if aRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Extra[0]) + } } + } -func TestDNS_RecursorTimeout(t *testing.T) { +func TestDNS_ServiceLookup_FilterACL(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - serverClientTimeout := 3 * time.Second - testClientTimeout := serverClientTimeout + 5*time.Second - - resolverAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") - if err != nil { - t.Error(err) + t.Parallel() + tests := []struct { + token string + results int + }{ + {"root", 1}, + {"anonymous", 0}, } + for _, tt := range tests { + t.Run("ACLToken == "+tt.token, func(t *testing.T) { + a := NewTestAgent(t, ` + primary_datacenter = "dc1" - resolver, err := net.ListenUDP("udp", resolverAddr) - if err != nil { - t.Error(err) - } - defer resolver.Close() + acl { + enabled = true + default_policy = "deny" + down_policy = "deny" - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - recursors = ["`+resolver.LocalAddr().String()+`"] // host must cause a connection|read|write timeout - dns_config { - recursor_timeout = "`+serverClientTimeout.String()+`" - } - `+experimentsHCL) + tokens { + initial_management = "root" + default = "`+tt.token+`" + } + } + `) defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") - m := new(dns.Msg) - m.SetQuestion("apple.com.", dns.TypeANY) + // Register a service + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "foo", + Port: 12345, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - // This client calling the server under test must have a longer timeout than the one we set internally - c := &dns.Client{Timeout: testClientTimeout} + // Set up the DNS query + c := new(dns.Client) + m := new(dns.Msg) + m.SetQuestion("foo.service.consul.", dns.TypeA) - start := time.Now() in, _, err := c.Exchange(m, a.DNSAddr()) - - duration := time.Since(start) - if err != nil { t.Fatalf("err: %v", err) } - - if len(in.Answer) != 0 { - t.Fatalf("Bad: %#v", in) - } - if in.Rcode != dns.RcodeServerFailure { + if len(in.Answer) != tt.results { t.Fatalf("Bad: %#v", in) } - - if duration < serverClientTimeout { - t.Fatalf("Expected the call to return after at least %f seconds but lasted only %f", serverClientTimeout.Seconds(), duration.Seconds()) - } }) } } +func TestDNS_ServiceLookup_MetaTXT(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } -// no way to run a v2 version of this test since it is calling a private function and not -// using a test agent. -func TestDNS_BinarySearch(t *testing.T) { - msgSrc := new(dns.Msg) - msgSrc.Compress = true - msgSrc.SetQuestion("redis.service.consul.", dns.TypeSRV) + a := NewTestAgent(t, `dns_config = { enable_additional_node_meta_txt = true }`) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.1", + NodeMeta: map[string]string{ + "key": "value", + }, + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - for i := 0; i < 5000; i++ { - target := fmt.Sprintf("host-redis-%d-%d.test.acme.com.node.dc1.consul.", i/256, i%256) - msgSrc.Answer = append(msgSrc.Answer, &dns.SRV{Hdr: dns.RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: dns.TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target}) - msgSrc.Extra = append(msgSrc.Extra, &dns.CNAME{Hdr: dns.RR_Header{Name: target, Class: 1, Rrtype: dns.TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.%d.%d.", i/256, i%256)}) + m := new(dns.Msg) + m.SetQuestion("db.service.consul.", dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) } - for _, compress := range []bool{true, false} { - for idx, maxSize := range []int{12, 256, 512, 8192, 65535} { - t.Run(fmt.Sprintf("binarySearch %d", maxSize), func(t *testing.T) { - msg := new(dns.Msg) - msgSrc.Compress = compress - msgSrc.SetQuestion("redis.service.consul.", dns.TypeSRV) - msg.Answer = msgSrc.Answer - msg.Extra = msgSrc.Extra - msg.Ns = msgSrc.Ns - index := make(map[string]dns.RR, len(msg.Extra)) - indexRRs(msg.Extra, index) - blen := dnsBinaryTruncate(msg, maxSize, index, true) - msg.Answer = msg.Answer[:blen] - syncExtra(index, msg) - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted < len(buf) { - t.Fatalf("Bug in DNS library: %d != %d", predicted, len(buf)) - } - if len(buf) > maxSize || (idx != 0 && len(buf) < 16) { - t.Fatalf("bad[%d]: %d > %d", idx, len(buf), maxSize) - } - }) - } + + wantAdditional := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, + A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 + }, + &dns.TXT{ + Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xa}, + Txt: []string{"key=value"}, + }, } + require.Equal(t, wantAdditional, in.Extra) } -func TestDNS_TCP_and_UDP_Truncate(t *testing.T) { +func TestDNS_ServiceLookup_SuppressTXT(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` - dns_config { - enable_truncate = true - } - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + a := NewTestAgent(t, `dns_config = { enable_additional_node_meta_txt = false }`) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - services := []string{"normal", "truncated"} - for index, service := range services { - numServices := (index * 5000) + 2 - var eg errgroup.Group - for i := 1; i < numServices; i++ { - j := i - eg.Go(func() error { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: fmt.Sprintf("%s-%d.acme.com", service, j), - Address: fmt.Sprintf("127.%d.%d.%d", 0, (j / 255), j%255), - Service: &structs.NodeService{ - Service: service, - Port: 8000, - }, - } + // Register a node with a service. + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.1", + NodeMeta: map[string]string{ + "key": "value", + }, + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + } - var out struct{} - return a.RPC(context.Background(), "Catalog.Register", args, &out) - }) - } - if err := eg.Wait(); err != nil { - t.Fatalf("error registering: %v", err) - } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: service, - Service: structs.ServiceQuery{ - Service: service, - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } + m := new(dns.Msg) + m.SetQuestion("db.service.consul.", dns.TypeSRV) - // Look up the service directly and via prepared query. Ensure the - // response is truncated each time. - questions := []string{ - fmt.Sprintf("%s.service.consul.", service), - id + ".query.consul.", - } - protocols := []string{ - "tcp", - "udp", - } - for _, maxSize := range []uint16{8192, 65535} { - for _, qType := range []uint16{dns.TypeANY, dns.TypeA, dns.TypeSRV} { - for _, question := range questions { - for _, protocol := range protocols { - for _, compress := range []bool{true, false} { - t.Run(fmt.Sprintf("lookup %s %s (qType:=%d) compressed=%v", question, protocol, qType, compress), func(t *testing.T) { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeANY) - maxSz := maxSize - if protocol == "udp" { - maxSz = 8192 - } - m.SetEdns0(maxSz, true) - c := new(dns.Client) - c.Net = protocol - m.Compress = compress - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - // actually check if we need to have the truncate bit - resbuf, err := in.Pack() - if err != nil { - t.Fatalf("Error while packing answer: %s", err) - } - if !in.Truncated && len(resbuf) > int(maxSz) { - t.Fatalf("should have truncate bit %#v %#v", in, len(in.Answer)) - } - // Check for the truncate bit - buf, err := m.Pack() - info := fmt.Sprintf("service %s question:=%s (%s) (%d total records) sz:= %d in %v", - service, question, protocol, numServices, len(in.Answer), in) - if err != nil { - t.Fatalf("Error while packing: %v ; info:=%s", err, info) - } - if len(buf) > int(maxSz) { - t.Fatalf("len(buf) := %d > maxSz=%d for %v", len(buf), maxSz, info) - } - }) - } - } - } - } - } - } - }) + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + wantAdditional := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, + A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 + }, } + require.Equal(t, wantAdditional, in.Extra) } func TestDNS_AddressLookup(t *testing.T) { @@ -1619,37 +6476,32 @@ func TestDNS_AddressLookup(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Look up the addresses - cases := map[string]string{ - "7f000001.addr.dc1.consul.": "127.0.0.1", - } - for question, answer := range cases { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeA) + // Look up the addresses + cases := map[string]string{ + "7f000001.addr.dc1.consul.": "127.0.0.1", + } + for question, answer := range cases { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeA) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - require.Len(t, in.Answer, 1) + require.Len(t, in.Answer, 1) - require.Equal(t, dns.TypeA, in.Answer[0].Header().Rrtype) - aRec, ok := in.Answer[0].(*dns.A) - require.True(t, ok) - require.Equal(t, aRec.A.To4().String(), answer) - require.Zero(t, aRec.Hdr.Ttl) - require.Nil(t, in.Ns) - require.Nil(t, in.Extra) - } - }) + require.Equal(t, dns.TypeA, in.Answer[0].Header().Rrtype) + aRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok) + require.Equal(t, aRec.A.To4().String(), answer) + require.Zero(t, aRec.Hdr.Ttl) } } @@ -1658,33 +6510,30 @@ func TestDNS_AddressLookupANY(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Look up the addresses - cases := map[string]string{ - "7f000001.addr.dc1.consul.": "127.0.0.1", - } - for question, answer := range cases { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeANY) + // Look up the addresses + cases := map[string]string{ + "7f000001.addr.dc1.consul.": "127.0.0.1", + } + for question, answer := range cases { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeANY) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - require.Len(t, in.Answer, 1) - require.Equal(t, in.Answer[0].Header().Rrtype, dns.TypeA) - aRec, ok := in.Answer[0].(*dns.A) - require.True(t, ok) - require.Equal(t, aRec.A.To4().String(), answer) - require.Zero(t, aRec.Hdr.Ttl) + require.NoError(t, err) + require.Len(t, in.Answer, 1) + require.Equal(t, in.Answer[0].Header().Rrtype, dns.TypeA) + aRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok) + require.Equal(t, aRec.A.To4().String(), answer) + require.Zero(t, aRec.Hdr.Ttl) - } - }) } } @@ -1693,34 +6542,26 @@ func TestDNS_AddressLookupInvalidType(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Look up the addresses - cases := map[string]string{ - "7f000001.addr.dc1.consul.": "", - } - for question := range cases { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) + // Look up the addresses + cases := map[string]string{ + "7f000001.addr.dc1.consul.": "", + } + for question := range cases { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - require.NoError(t, err) - require.Zero(t, in.Rcode) - require.Nil(t, in.Answer) - require.NotNil(t, in.Extra) - require.Len(t, in.Extra, 1) - aRecord := in.Extra[0].(*dns.A) - require.Equal(t, "7f000001.addr.dc1.consul.", aRecord.Hdr.Name) - require.Equal(t, dns.TypeA, aRecord.Hdr.Rrtype) - require.Zero(t, aRecord.Hdr.Ttl) - require.Equal(t, "127.0.0.1", aRecord.A.String()) - } - }) + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + require.NoError(t, err) + require.Zero(t, in.Rcode) + require.Nil(t, in.Answer) + require.NotNil(t, in.Extra) + require.Len(t, in.Extra, 1) } } @@ -1729,46 +6570,43 @@ func TestDNS_AddressLookupIPV6(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Look up the addresses - cases := map[string]string{ - "2607002040050808000000000000200e.addr.consul.": "2607:20:4005:808::200e", - "2607112040051808ffffffffffff200e.addr.consul.": "2607:1120:4005:1808:ffff:ffff:ffff:200e", - } - for question, answer := range cases { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeAAAA) + // Look up the addresses + cases := map[string]string{ + "2607002040050808000000000000200e.addr.consul.": "2607:20:4005:808::200e", + "2607112040051808ffffffffffff200e.addr.consul.": "2607:1120:4005:1808:ffff:ffff:ffff:200e", + } + for question, answer := range cases { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeAAAA) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } - if in.Answer[0].Header().Rrtype != dns.TypeAAAA { - t.Fatalf("Invalid type: %#v", in.Answer[0]) - } - aaaaRec, ok := in.Answer[0].(*dns.AAAA) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if aaaaRec.AAAA.To16().String() != answer { - t.Fatalf("Bad: %#v", aaaaRec) - } - if aaaaRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - } - }) + if in.Answer[0].Header().Rrtype != dns.TypeAAAA { + t.Fatalf("Invalid type: %#v", in.Answer[0]) + } + aaaaRec, ok := in.Answer[0].(*dns.AAAA) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if aaaaRec.AAAA.To16().String() != answer { + t.Fatalf("Bad: %#v", aaaaRec) + } + if aaaaRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } } } @@ -1777,32 +6615,29 @@ func TestDNS_AddressLookupIPV6InvalidType(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Look up the addresses - cases := map[string]string{ - "2607002040050808000000000000200e.addr.consul.": "2607:20:4005:808::200e", - "2607112040051808ffffffffffff200e.addr.consul.": "2607:1120:4005:1808:ffff:ffff:ffff:200e", - } - for question := range cases { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) + // Look up the addresses + cases := map[string]string{ + "2607002040050808000000000000200e.addr.consul.": "2607:20:4005:808::200e", + "2607112040051808ffffffffffff200e.addr.consul.": "2607:1120:4005:1808:ffff:ffff:ffff:200e", + } + for question := range cases { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - if in.Answer != nil { - t.Fatalf("Bad: %#v", in) - } - } - }) + if in.Answer != nil { + t.Fatalf("Bad: %#v", in) + } } } @@ -1814,30 +6649,22 @@ func TestDNS_NonExistentDC_Server(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - m := new(dns.Msg) - m.SetQuestion("consul.service.dc2.consul.", dns.TypeANY) + m := new(dns.Msg) + m.SetQuestion("consul.service.dc2.consul.", dns.TypeANY) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - require.Equal(t, dns.RcodeNameError, in.Rcode) - require.Equal(t, 0, len(in.Answer)) - require.Equal(t, 0, len(in.Extra)) - require.Equal(t, 1, len(in.Ns)) - soa := in.Ns[0].(*dns.SOA) - require.Equal(t, "consul.", soa.Hdr.Name) - require.Equal(t, "ns.consul.", soa.Ns) - require.Equal(t, "hostmaster.consul.", soa.Mbox) - }) + if in.Rcode != dns.RcodeNameError { + t.Fatalf("Expected RCode: %#v, had: %#v", dns.RcodeNameError, in.Rcode) } } @@ -1849,220 +6676,207 @@ func TestDNS_NonExistentDC_RPC(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - s := NewTestAgent(t, ` + t.Parallel() + s := NewTestAgent(t, ` node_name = "test-server" - `+experimentsHCL) + `) - defer s.Shutdown() - c := NewTestAgent(t, ` + defer s.Shutdown() + c := NewTestAgent(t, ` node_name = "test-client" bootstrap = false server = false - `+experimentsHCL) - defer c.Shutdown() + `) + defer c.Shutdown() - // Join LAN cluster - addr := fmt.Sprintf("127.0.0.1:%d", s.Config.SerfPortLAN) - _, err := c.JoinLAN([]string{addr}, nil) - require.NoError(t, err) - testrpc.WaitForTestAgent(t, c.RPC, "dc1") + // Join LAN cluster + addr := fmt.Sprintf("127.0.0.1:%d", s.Config.SerfPortLAN) + _, err := c.JoinLAN([]string{addr}, nil) + require.NoError(t, err) + testrpc.WaitForTestAgent(t, c.RPC, "dc1") - m := new(dns.Msg) - m.SetQuestion("consul.service.dc2.consul.", dns.TypeANY) + m := new(dns.Msg) + m.SetQuestion("consul.service.dc2.consul.", dns.TypeANY) - d := new(dns.Client) - in, _, err := d.Exchange(m, c.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + d := new(dns.Client) + in, _, err := d.Exchange(m, c.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - if in.Rcode != dns.RcodeNameError { - t.Fatalf("Expected RCode: %#v, had: %#v", dns.RcodeNameError, in.Rcode) - } - }) + if in.Rcode != dns.RcodeNameError { + t.Fatalf("Expected RCode: %#v, had: %#v", dns.RcodeNameError, in.Rcode) } } -func TestDNS_NonExistentLookup(t *testing.T) { +func TestDNS_NonExistingLookup(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // lookup a non-existing node, we should receive a SOA - m := new(dns.Msg) - m.SetQuestion("nonexisting.consul.", dns.TypeANY) + // lookup a non-existing node, we should receive a SOA + m := new(dns.Msg) + m.SetQuestion("nonexisting.consul.", dns.TypeANY) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - if len(in.Ns) != 1 { - t.Fatalf("Bad: %#v %#v", in, len(in.Answer)) - } + if len(in.Ns) != 1 { + t.Fatalf("Bad: %#v %#v", in, len(in.Answer)) + } - soaRec, ok := in.Ns[0].(*dns.SOA) - if !ok { - t.Fatalf("Bad: %#v", in.Ns[0]) - } - if soaRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Ns[0]) - } - }) + soaRec, ok := in.Ns[0].(*dns.SOA) + if !ok { + t.Fatalf("Bad: %#v", in.Ns[0]) + } + if soaRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Ns[0]) } } -func TestDNS_NonExistentLookupEmptyAorAAAA(t *testing.T) { +func TestDNS_NonExistingLookupEmptyAorAAAA(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - - // Register a v6-only service and a v4-only service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foov6", - Address: "fe80::1", - Service: &structs.NodeService{ - Service: "webv6", - Port: 8000, - }, - } + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } + // Register a v6-only service and a v4-only service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foov6", + Address: "fe80::1", + Service: &structs.NodeService{ + Service: "webv6", + Port: 8000, + }, + } - args = &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foov4", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "webv4", - Port: 8000, - }, - } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } + args = &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foov4", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "webv4", + Port: 8000, + }, + } - // Register equivalent prepared queries. - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "webv4", - Service: structs.ServiceQuery{ - Service: "webv4", - }, - }, - } + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } - var id string - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } + // Register equivalent prepared queries. + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "webv4", + Service: structs.ServiceQuery{ + Service: "webv4", + }, + }, + } - args = &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "webv6", - Service: structs.ServiceQuery{ - Service: "webv6", - }, - }, - } + var id string + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } + args = &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "webv6", + Service: structs.ServiceQuery{ + Service: "webv6", + }, + }, + } - // Check for ipv6 records on ipv4-only service directly and via the - // prepared query. - questions := []string{ - "webv4.service.consul.", - "webv4.query.consul.", - } - for _, question := range questions { - t.Run(question, func(t *testing.T) { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeAAAA) + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + // Check for ipv6 records on ipv4-only service directly and via the + // prepared query. + questions := []string{ + "webv4.service.consul.", + "webv4.query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeAAAA) - require.Len(t, in.Ns, 1) - soaRec, ok := in.Ns[0].(*dns.SOA) - if !ok { - t.Fatalf("Bad: %#v", in.Ns[0]) - } - if soaRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Ns[0]) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - require.Equal(t, dns.RcodeSuccess, in.Rcode) - }) - } + require.Len(t, in.Ns, 1) + soaRec, ok := in.Ns[0].(*dns.SOA) + if !ok { + t.Fatalf("Bad: %#v", in.Ns[0]) + } + if soaRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Ns[0]) + } - // Check for ipv4 records on ipv6-only service directly and via the - // prepared query. - questions = []string{ - "webv6.service.consul.", - "webv6.query.consul.", - } - for _, question := range questions { - t.Run(question, func(t *testing.T) { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeA) + require.Equal(t, dns.RcodeSuccess, in.Rcode) + } - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + // Check for ipv4 records on ipv6-only service directly and via the + // prepared query. + questions = []string{ + "webv6.service.consul.", + "webv6.query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeA) - if len(in.Ns) != 1 { - t.Fatalf("Bad: %#v", in) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - soaRec, ok := in.Ns[0].(*dns.SOA) - if !ok { - t.Fatalf("Bad: %#v", in.Ns[0]) - } - if soaRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Ns[0]) - } + if len(in.Ns) != 1 { + t.Fatalf("Bad: %#v", in) + } - if in.Rcode != dns.RcodeSuccess { - t.Fatalf("Bad: %#v", in) - } - }) - } - }) + soaRec, ok := in.Ns[0].(*dns.SOA) + if !ok { + t.Fatalf("Bad: %#v", in.Ns[0]) + } + if soaRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Ns[0]) + } + + if in.Rcode != dns.RcodeSuccess { + t.Fatalf("Bad: %#v", in) + } } } @@ -2071,96 +6885,93 @@ func TestDNS_AltDomains_Service(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` + t.Parallel() + a := NewTestAgent(t, ` alt_domain = "test-domain." - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "test-node", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - NodeMeta: map[string]string{ - "key": "value", - }, - } + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "test-node", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + NodeMeta: map[string]string{ + "key": "value", + }, + } - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } - questions := []struct { - ask string - wantDomain string - }{ - {"db.service.consul.", "test-node.node.dc1.consul."}, - {"db.service.test-domain.", "test-node.node.dc1.test-domain."}, - {"db.service.dc1.consul.", "test-node.node.dc1.consul."}, - {"db.service.dc1.test-domain.", "test-node.node.dc1.test-domain."}, - } + questions := []struct { + ask string + wantDomain string + }{ + {"db.service.consul.", "test-node.node.dc1.consul."}, + {"db.service.test-domain.", "test-node.node.dc1.test-domain."}, + {"db.service.dc1.consul.", "test-node.node.dc1.consul."}, + {"db.service.dc1.test-domain.", "test-node.node.dc1.test-domain."}, + } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question.ask, dns.TypeSRV) + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question.ask, dns.TypeSRV) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } - srvRec, ok := in.Answer[0].(*dns.SRV) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } - if srvRec.Port != 12345 { - t.Fatalf("Bad: %#v", srvRec) - } - if got, want := srvRec.Target, question.wantDomain; got != want { - t.Fatalf("SRV target invalid, got %v want %v", got, want) - } + srvRec, ok := in.Answer[0].(*dns.SRV) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if srvRec.Port != 12345 { + t.Fatalf("Bad: %#v", srvRec) + } + if got, want := srvRec.Target, question.wantDomain; got != want { + t.Fatalf("SRV target invalid, got %v want %v", got, want) + } - aRec, ok := in.Extra[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[0]) - } + aRec, ok := in.Extra[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[0]) + } - if got, want := aRec.Hdr.Name, question.wantDomain; got != want { - t.Fatalf("A record header invalid, got %v want %v", got, want) - } + if got, want := aRec.Hdr.Name, question.wantDomain; got != want { + t.Fatalf("A record header invalid, got %v want %v", got, want) + } - if aRec.A.String() != "127.0.0.1" { - t.Fatalf("Bad: %#v", in.Extra[0]) - } + if aRec.A.String() != "127.0.0.1" { + t.Fatalf("Bad: %#v", in.Extra[0]) + } - txtRec, ok := in.Extra[1].(*dns.TXT) - if !ok { - t.Fatalf("Bad: %#v", in.Extra[1]) - } - if got, want := txtRec.Hdr.Name, question.wantDomain; got != want { - t.Fatalf("TXT record header invalid, got %v want %v", got, want) - } - if txtRec.Txt[0] != "key=value" { - t.Fatalf("Bad: %#v", in.Extra[1]) - } - } - }) + txtRec, ok := in.Extra[1].(*dns.TXT) + if !ok { + t.Fatalf("Bad: %#v", in.Extra[1]) + } + if got, want := txtRec.Hdr.Name, question.wantDomain; got != want { + t.Fatalf("TXT record header invalid, got %v want %v", got, want) + } + if txtRec.Txt[0] != "key=value" { + t.Fatalf("Bad: %#v", in.Extra[1]) + } } } @@ -2169,50 +6980,47 @@ func TestDNS_AltDomains_SOA(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` + t.Parallel() + a := NewTestAgent(t, ` node_name = "test-node" alt_domain = "test-domain." - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - questions := []struct { - ask string - want_domain string - }{ - {"test-node.node.consul.", "consul."}, - {"test-node.node.test-domain.", "test-domain."}, - } + questions := []struct { + ask string + want_domain string + }{ + {"test-node.node.consul.", "consul."}, + {"test-node.node.test-domain.", "test-domain."}, + } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question.ask, dns.TypeSOA) + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question.ask, dns.TypeSOA) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - if len(in.Answer) != 1 { - t.Fatalf("Bad: %#v", in) - } + if len(in.Answer) != 1 { + t.Fatalf("Bad: %#v", in) + } - soaRec, ok := in.Answer[0].(*dns.SOA) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } + soaRec, ok := in.Answer[0].(*dns.SOA) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } - if got, want := soaRec.Hdr.Name, question.want_domain; got != want { - t.Fatalf("SOA name invalid, got %q want %q", got, want) - } - if got, want := soaRec.Ns, ("ns." + question.want_domain); got != want { - t.Fatalf("SOA ns invalid, got %q want %q", got, want) - } - } - }) + if got, want := soaRec.Hdr.Name, question.want_domain; got != want { + t.Fatalf("SOA name invalid, got %q want %q", got, want) + } + if got, want := soaRec.Ns, ("ns." + question.want_domain); got != want { + t.Fatalf("SOA ns invalid, got %q want %q", got, want) + } } } @@ -2224,46 +7032,43 @@ func TestDNS_AltDomains_Overlap(t *testing.T) { // this tests the domain matching logic in DNSServer when encountering more // than one potential match (i.e. ambiguous match) // it should select the longer matching domain when dispatching - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` + t.Parallel() + a := NewTestAgent(t, ` node_name = "test-node" alt_domain = "test.consul." - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - questions := []string{ - "test-node.node.consul.", - "test-node.node.test.consul.", - "test-node.node.dc1.consul.", - "test-node.node.dc1.test.consul.", - } + questions := []string{ + "test-node.node.consul.", + "test-node.node.test.consul.", + "test-node.node.dc1.consul.", + "test-node.node.dc1.test.consul.", + } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeA) + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeA) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - if len(in.Answer) != 1 { - t.Fatalf("failed to resolve ambiguous alt domain %q: %#v", question, in) - } + if len(in.Answer) != 1 { + t.Fatalf("failed to resolve ambiguous alt domain %q: %#v", question, in) + } - aRec, ok := in.Answer[0].(*dns.A) - if !ok { - t.Fatalf("Bad: %#v", in.Answer[0]) - } + aRec, ok := in.Answer[0].(*dns.A) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } - if got, want := aRec.A.To4().String(), "127.0.0.1"; got != want { - t.Fatalf("A ip invalid, got %v want %v", got, want) - } - } - }) + if got, want := aRec.A.To4().String(), "127.0.0.1"; got != want { + t.Fatalf("A ip invalid, got %v want %v", got, want) + } } } @@ -2274,38 +7079,35 @@ func TestDNS_AltDomain_DCName_Overlap(t *testing.T) { // this tests the DC name overlap with the consul domain/alt-domain // we should get response when DC suffix is a prefix of consul alt-domain - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` + t.Parallel() + a := NewTestAgent(t, ` datacenter = "dc-test" node_name = "test-node" alt_domain = "test.consul." - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc-test") + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc-test") - questions := []string{ - "test-node.node.dc-test.consul.", - "test-node.node.dc-test.test.consul.", - } + questions := []string{ + "test-node.node.dc-test.consul.", + "test-node.node.dc-test.test.consul.", + } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeA) + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeA) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - require.Len(t, in.Answer, 1) + require.Len(t, in.Answer, 1) - aRec, ok := in.Answer[0].(*dns.A) - require.True(t, ok) - require.Equal(t, aRec.A.To4().String(), "127.0.0.1") - } - }) + aRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok) + require.Equal(t, aRec.A.To4().String(), "127.0.0.1") } } @@ -2314,54 +7116,52 @@ func TestDNS_PreparedQuery_AllowStale(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` + t.Parallel() + a := NewTestAgent(t, ` dns_config { allow_stale = true max_stale = "1s" } - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - m := MockPreparedQuery{ - executeFn: func(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExecuteResponse) error { - // Return a response that's perpetually too stale. - reply.LastContact = 2 * time.Second - return nil - }, - } + m := MockPreparedQuery{ + executeFn: func(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExecuteResponse) error { + // Return a response that's perpetually too stale. + reply.LastContact = 2 * time.Second + return nil + }, + } - if err := a.registerEndpoint("PreparedQuery", &m); err != nil { - t.Fatalf("err: %v", err) - } + if err := a.registerEndpoint("PreparedQuery", &m); err != nil { + t.Fatalf("err: %v", err) + } - // Make sure that the lookup terminates and results in an SOA since - // the query doesn't exist. - { - m := new(dns.Msg) - m.SetQuestion("nope.query.consul.", dns.TypeSRV) + // Make sure that the lookup terminates and results in an SOA since + // the query doesn't exist. + { + m := new(dns.Msg) + m.SetQuestion("nope.query.consul.", dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + if len(in.Ns) != 1 { + t.Fatalf("Bad: %#v", in) + } - if len(in.Ns) != 1 { - t.Fatalf("Bad: %#v", in) - } + soaRec, ok := in.Ns[0].(*dns.SOA) + if !ok { + t.Fatalf("Bad: %#v", in.Ns[0]) + } + if soaRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Ns[0]) + } - soaRec, ok := in.Ns[0].(*dns.SOA) - if !ok { - t.Fatalf("Bad: %#v", in.Ns[0]) - } - if soaRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Ns[0]) - } - } - }) } } @@ -2370,46 +7170,44 @@ func TestDNS_InvalidQueries(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - // Try invalid forms of queries that should hit the special invalid case - // of our query parser. - questions := []string{ - "consul.", - "node.consul.", - "service.consul.", - "query.consul.", - "foo.node.dc1.extra.more.consul.", - "foo.service.dc1.extra.more.consul.", - "foo.query.dc1.extra.more.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) + // Try invalid forms of queries that should hit the special invalid case + // of our query parser. + questions := []string{ + "consul.", + "node.consul.", + "service.consul.", + "query.consul.", + "foo.node.dc1.extra.more.consul.", + "foo.service.dc1.extra.more.consul.", + "foo.query.dc1.extra.more.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - if len(in.Ns) != 1 { - t.Fatalf("Bad: %#v", in) - } + if len(in.Ns) != 1 { + t.Fatalf("Bad: %#v", in) + } + + soaRec, ok := in.Ns[0].(*dns.SOA) + if !ok { + t.Fatalf("Bad: %#v", in.Ns[0]) + } + if soaRec.Hdr.Ttl != 0 { + t.Fatalf("Bad: %#v", in.Ns[0]) + } - soaRec, ok := in.Ns[0].(*dns.SOA) - if !ok { - t.Fatalf("Bad: %#v", in.Ns[0]) - } - if soaRec.Hdr.Ttl != 0 { - t.Fatalf("Bad: %#v", in.Ns[0]) - } - } - }) } } @@ -2418,38 +7216,35 @@ func TestDNS_PreparedQuery_AgentSource(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - m := MockPreparedQuery{ - executeFn: func(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExecuteResponse) error { - // Check that the agent inserted its self-name and datacenter to - // the RPC request body. - if args.Agent.Datacenter != a.Config.Datacenter || - args.Agent.Node != a.Config.NodeName { - t.Fatalf("bad: %#v", args.Agent) - } - return nil - }, + m := MockPreparedQuery{ + executeFn: func(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExecuteResponse) error { + // Check that the agent inserted its self-name and datacenter to + // the RPC request body. + if args.Agent.Datacenter != a.Config.Datacenter || + args.Agent.Node != a.Config.NodeName { + t.Fatalf("bad: %#v", args.Agent) } + return nil + }, + } - if err := a.registerEndpoint("PreparedQuery", &m); err != nil { - t.Fatalf("err: %v", err) - } + if err := a.registerEndpoint("PreparedQuery", &m); err != nil { + t.Fatalf("err: %v", err) + } - { - m := new(dns.Msg) - m.SetQuestion("foo.query.consul.", dns.TypeSRV) + { + m := new(dns.Msg) + m.SetQuestion("foo.query.consul.", dns.TypeSRV) - c := new(dns.Client) - if _, _, err := c.Exchange(m, a.DNSAddr()); err != nil { - t.Fatalf("err: %v", err) - } - } - }) + c := new(dns.Client) + if _, _, err := c.Exchange(m, a.DNSAddr()); err != nil { + t.Fatalf("err: %v", err) + } } } @@ -2458,255 +7253,239 @@ func TestDNS_EDNS_Truncate_AgentSource(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, ` + t.Parallel() + a := NewTestAgent(t, ` dns_config { enable_truncate = true } - `+experimentsHCL) - defer a.Shutdown() - a.DNSDisableCompression(true) - testrpc.WaitForLeader(t, a.RPC, "dc1") + `) + defer a.Shutdown() + a.DNSDisableCompression(true) + testrpc.WaitForLeader(t, a.RPC, "dc1") - m := MockPreparedQuery{ - executeFn: func(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExecuteResponse) error { - // Check that the agent inserted its self-name and datacenter to - // the RPC request body. - if args.Agent.Datacenter != a.Config.Datacenter || - args.Agent.Node != a.Config.NodeName { - t.Fatalf("bad: %#v", args.Agent) - } - for i := 0; i < 100; i++ { - reply.Nodes = append(reply.Nodes, structs.CheckServiceNode{Node: &structs.Node{Node: "apple", Address: fmt.Sprintf("node.address:%d", i)}, Service: &structs.NodeService{Service: "appleService", Address: fmt.Sprintf("service.address:%d", i)}}) - } - return nil - }, + m := MockPreparedQuery{ + executeFn: func(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExecuteResponse) error { + // Check that the agent inserted its self-name and datacenter to + // the RPC request body. + if args.Agent.Datacenter != a.Config.Datacenter || + args.Agent.Node != a.Config.NodeName { + t.Fatalf("bad: %#v", args.Agent) } - - if err := a.registerEndpoint("PreparedQuery", &m); err != nil { - t.Fatalf("err: %v", err) + for i := 0; i < 100; i++ { + reply.Nodes = append(reply.Nodes, structs.CheckServiceNode{Node: &structs.Node{Node: "apple", Address: fmt.Sprintf("node.address:%d", i)}, Service: &structs.NodeService{Service: "appleService", Address: fmt.Sprintf("service.address:%d", i)}}) } + return nil + }, + } - req := new(dns.Msg) - req.SetQuestion("foo.query.consul.", dns.TypeSRV) - req.SetEdns0(2048, true) - req.Compress = false - - c := new(dns.Client) - resp, _, err := c.Exchange(req, a.DNSAddr()) - require.NoError(t, err) - require.True(t, resp.Len() < 2048) - }) + if err := a.registerEndpoint("PreparedQuery", &m); err != nil { + t.Fatalf("err: %v", err) } + + req := new(dns.Msg) + req.SetQuestion("foo.query.consul.", dns.TypeSRV) + req.SetEdns0(2048, true) + req.Compress = false + + c := new(dns.Client) + resp, _, err := c.Exchange(req, a.DNSAddr()) + require.NoError(t, err) + require.True(t, resp.Len() < 2048) } func TestDNS_trimUDPResponse_NoTrim(t *testing.T) { - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - - req := &dns.Msg{} - resp := &dns.Msg{ - Answer: []dns.RR{ - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Target: "ip-10-0-1-185.node.dc1.consul.", - }, + t.Parallel() + req := &dns.Msg{} + resp := &dns.Msg{ + Answer: []dns.RR{ + &dns.SRV{ + Hdr: dns.RR_Header{ + Name: "redis-cache-redis.service.consul.", + Rrtype: dns.TypeSRV, + Class: dns.ClassINET, }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "ip-10-0-1-185.node.dc1.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("10.0.1.185"), - }, + Target: "ip-10-0-1-185.node.dc1.consul.", + }, + }, + Extra: []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{ + Name: "ip-10-0-1-185.node.dc1.consul.", + Rrtype: dns.TypeA, + Class: dns.ClassINET, }, - } + A: net.ParseIP("10.0.1.185"), + }, + }, + } - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `+experimentsHCL) - if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); trimmed { - t.Fatalf("Bad %#v", *resp) - } + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy"`) + if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); trimmed { + t.Fatalf("Bad %#v", *resp) + } - expected := &dns.Msg{ - Answer: []dns.RR{ - &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Target: "ip-10-0-1-185.node.dc1.consul.", - }, + expected := &dns.Msg{ + Answer: []dns.RR{ + &dns.SRV{ + Hdr: dns.RR_Header{ + Name: "redis-cache-redis.service.consul.", + Rrtype: dns.TypeSRV, + Class: dns.ClassINET, }, - Extra: []dns.RR{ - &dns.A{ - Hdr: dns.RR_Header{ - Name: "ip-10-0-1-185.node.dc1.consul.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP("10.0.1.185"), - }, + Target: "ip-10-0-1-185.node.dc1.consul.", + }, + }, + Extra: []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{ + Name: "ip-10-0-1-185.node.dc1.consul.", + Rrtype: dns.TypeA, + Class: dns.ClassINET, }, - } - if !reflect.DeepEqual(resp, expected) { - t.Fatalf("Bad %#v vs. %#v", *resp, *expected) - } - }) + A: net.ParseIP("10.0.1.185"), + }, + }, + } + if !reflect.DeepEqual(resp, expected) { + t.Fatalf("Bad %#v vs. %#v", *resp, *expected) } } func TestDNS_trimUDPResponse_TrimLimit(t *testing.T) { - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `+experimentsHCL) - - req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{} - for i := 0; i < cfg.DNSUDPAnswerLimit+1; i++ { - target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 185+i) - srv := &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Target: target, - } - a := &dns.A{ - Hdr: dns.RR_Header{ - Name: target, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 185+i)), - } + t.Parallel() + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy"`) + + req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{} + for i := 0; i < cfg.DNSUDPAnswerLimit+1; i++ { + target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 185+i) + srv := &dns.SRV{ + Hdr: dns.RR_Header{ + Name: "redis-cache-redis.service.consul.", + Rrtype: dns.TypeSRV, + Class: dns.ClassINET, + }, + Target: target, + } + a := &dns.A{ + Hdr: dns.RR_Header{ + Name: target, + Rrtype: dns.TypeA, + Class: dns.ClassINET, + }, + A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 185+i)), + } - resp.Answer = append(resp.Answer, srv) - resp.Extra = append(resp.Extra, a) - if i < cfg.DNSUDPAnswerLimit { - expected.Answer = append(expected.Answer, srv) - expected.Extra = append(expected.Extra, a) - } - } + resp.Answer = append(resp.Answer, srv) + resp.Extra = append(resp.Extra, a) + if i < cfg.DNSUDPAnswerLimit { + expected.Answer = append(expected.Answer, srv) + expected.Extra = append(expected.Extra, a) + } + } - if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); !trimmed { - t.Fatalf("Bad %#v", *resp) - } - if !reflect.DeepEqual(resp, expected) { - t.Fatalf("Bad %#v vs. %#v", *resp, *expected) - } - }) + if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); !trimmed { + t.Fatalf("Bad %#v", *resp) + } + if !reflect.DeepEqual(resp, expected) { + t.Fatalf("Bad %#v vs. %#v", *resp, *expected) } } func TestDNS_trimUDPResponse_TrimLimitWithNS(t *testing.T) { - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `+experimentsHCL) - - req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{} - for i := 0; i < cfg.DNSUDPAnswerLimit+1; i++ { - target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 185+i) - srv := &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Target: target, - } - a := &dns.A{ - Hdr: dns.RR_Header{ - Name: target, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 185+i)), - } - ns := &dns.SOA{ - Hdr: dns.RR_Header{ - Name: target, - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - }, - Ns: fmt.Sprintf("soa-%d", i), - } + t.Parallel() + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy"`) + + req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{} + for i := 0; i < cfg.DNSUDPAnswerLimit+1; i++ { + target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 185+i) + srv := &dns.SRV{ + Hdr: dns.RR_Header{ + Name: "redis-cache-redis.service.consul.", + Rrtype: dns.TypeSRV, + Class: dns.ClassINET, + }, + Target: target, + } + a := &dns.A{ + Hdr: dns.RR_Header{ + Name: target, + Rrtype: dns.TypeA, + Class: dns.ClassINET, + }, + A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 185+i)), + } + ns := &dns.SOA{ + Hdr: dns.RR_Header{ + Name: target, + Rrtype: dns.TypeSOA, + Class: dns.ClassINET, + }, + Ns: fmt.Sprintf("soa-%d", i), + } - resp.Answer = append(resp.Answer, srv) - resp.Extra = append(resp.Extra, a) - resp.Ns = append(resp.Ns, ns) - if i < cfg.DNSUDPAnswerLimit { - expected.Answer = append(expected.Answer, srv) - expected.Extra = append(expected.Extra, a) - } - } + resp.Answer = append(resp.Answer, srv) + resp.Extra = append(resp.Extra, a) + resp.Ns = append(resp.Ns, ns) + if i < cfg.DNSUDPAnswerLimit { + expected.Answer = append(expected.Answer, srv) + expected.Extra = append(expected.Extra, a) + } + } - if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); !trimmed { - t.Fatalf("Bad %#v", *resp) - } - require.LessOrEqual(t, resp.Len(), defaultMaxUDPSize) - require.Len(t, resp.Ns, 0) - }) + if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); !trimmed { + t.Fatalf("Bad %#v", *resp) } + require.LessOrEqual(t, resp.Len(), defaultMaxUDPSize) + require.Len(t, resp.Ns, 0) } func TestDNS_trimTCPResponse_TrimLimitWithNS(t *testing.T) { - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `+experimentsHCL) - - req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{} - for i := 0; i < 5000; i++ { - target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 185+i) - srv := &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Target: target, - } - a := &dns.A{ - Hdr: dns.RR_Header{ - Name: target, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 185+i)), - } - ns := &dns.SOA{ - Hdr: dns.RR_Header{ - Name: target, - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - }, - Ns: fmt.Sprintf("soa-%d", i), - } + t.Parallel() + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy"`) - resp.Answer = append(resp.Answer, srv) - resp.Extra = append(resp.Extra, a) - resp.Ns = append(resp.Ns, ns) - if i < cfg.DNSUDPAnswerLimit { - expected.Answer = append(expected.Answer, srv) - expected.Extra = append(expected.Extra, a) - } - } - req.Question = append(req.Question, dns.Question{Qtype: dns.TypeSRV}) + req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{} + for i := 0; i < 5000; i++ { + target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 185+i) + srv := &dns.SRV{ + Hdr: dns.RR_Header{ + Name: "redis-cache-redis.service.consul.", + Rrtype: dns.TypeSRV, + Class: dns.ClassINET, + }, + Target: target, + } + a := &dns.A{ + Hdr: dns.RR_Header{ + Name: target, + Rrtype: dns.TypeA, + Class: dns.ClassINET, + }, + A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 185+i)), + } + ns := &dns.SOA{ + Hdr: dns.RR_Header{ + Name: target, + Rrtype: dns.TypeSOA, + Class: dns.ClassINET, + }, + Ns: fmt.Sprintf("soa-%d", i), + } - if trimmed := trimTCPResponse(req, resp); !trimmed { - t.Fatalf("Bad %#v", *resp) - } - require.LessOrEqual(t, resp.Len(), 65523) - require.Len(t, resp.Ns, 0) - }) + resp.Answer = append(resp.Answer, srv) + resp.Extra = append(resp.Extra, a) + resp.Ns = append(resp.Ns, ns) + if i < cfg.DNSUDPAnswerLimit { + expected.Answer = append(expected.Answer, srv) + expected.Extra = append(expected.Extra, a) + } + } + req.Question = append(req.Question, dns.Question{Qtype: dns.TypeSRV}) + + if trimmed := trimTCPResponse(req, resp); !trimmed { + t.Fatalf("Bad %#v", *resp) } + require.LessOrEqual(t, resp.Len(), 65523) + require.Len(t, resp.Ns, 0) } func loadRuntimeConfig(t *testing.T, hcl string) *config.RuntimeConfig { @@ -2718,197 +7497,188 @@ func loadRuntimeConfig(t *testing.T, hcl string) *config.RuntimeConfig { } func TestDNS_trimUDPResponse_TrimSize(t *testing.T) { - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `+experimentsHCL) - - req, resp := &dns.Msg{}, &dns.Msg{} - for i := 0; i < 100; i++ { - target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 185+i) - srv := &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Target: target, - } - a := &dns.A{ - Hdr: dns.RR_Header{ - Name: target, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 185+i)), - } + t.Parallel() + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy"`) + + req, resp := &dns.Msg{}, &dns.Msg{} + for i := 0; i < 100; i++ { + target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 185+i) + srv := &dns.SRV{ + Hdr: dns.RR_Header{ + Name: "redis-cache-redis.service.consul.", + Rrtype: dns.TypeSRV, + Class: dns.ClassINET, + }, + Target: target, + } + a := &dns.A{ + Hdr: dns.RR_Header{ + Name: target, + Rrtype: dns.TypeA, + Class: dns.ClassINET, + }, + A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 185+i)), + } - resp.Answer = append(resp.Answer, srv) - resp.Extra = append(resp.Extra, a) - } + resp.Answer = append(resp.Answer, srv) + resp.Extra = append(resp.Extra, a) + } - // We don't know the exact trim, but we know the resulting answer - // data should match its extra data. - if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); !trimmed { - t.Fatalf("Bad %#v", *resp) - } - if len(resp.Answer) == 0 || len(resp.Answer) != len(resp.Extra) { - t.Fatalf("Bad %#v", *resp) - } - for i := range resp.Answer { - srv, ok := resp.Answer[i].(*dns.SRV) - if !ok { - t.Fatalf("should be SRV") - } + // We don't know the exact trim, but we know the resulting answer + // data should match its extra data. + if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); !trimmed { + t.Fatalf("Bad %#v", *resp) + } + if len(resp.Answer) == 0 || len(resp.Answer) != len(resp.Extra) { + t.Fatalf("Bad %#v", *resp) + } + for i := range resp.Answer { + srv, ok := resp.Answer[i].(*dns.SRV) + if !ok { + t.Fatalf("should be SRV") + } - a, ok := resp.Extra[i].(*dns.A) - if !ok { - t.Fatalf("should be A") - } + a, ok := resp.Extra[i].(*dns.A) + if !ok { + t.Fatalf("should be A") + } - if srv.Target != a.Header().Name { - t.Fatalf("Bad %#v vs. %#v", *srv, *a) - } - } - }) + if srv.Target != a.Header().Name { + t.Fatalf("Bad %#v vs. %#v", *srv, *a) + } } } func TestDNS_trimUDPResponse_TrimSizeEDNS(t *testing.T) { - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `+experimentsHCL) - - req, resp := &dns.Msg{}, &dns.Msg{} - - for i := 0; i < 100; i++ { - target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 150+i) - srv := &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Target: target, - } - a := &dns.A{ - Hdr: dns.RR_Header{ - Name: target, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 150+i)), - } - - resp.Answer = append(resp.Answer, srv) - resp.Extra = append(resp.Extra, a) - } - - // Copy over to a new slice since we are trimming both. - reqEDNS, respEDNS := &dns.Msg{}, &dns.Msg{} - reqEDNS.SetEdns0(2048, true) - respEDNS.Answer = append(respEDNS.Answer, resp.Answer...) - respEDNS.Extra = append(respEDNS.Extra, resp.Extra...) - - // Trim each response - if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); !trimmed { - t.Errorf("expected response to be trimmed: %#v", resp) - } - if trimmed := trimUDPResponse(reqEDNS, respEDNS, cfg.DNSUDPAnswerLimit); !trimmed { - t.Errorf("expected edns to be trimmed: %#v", resp) - } - - // Check answer lengths - if len(resp.Answer) == 0 || len(resp.Answer) != len(resp.Extra) { - t.Errorf("bad response answer length: %#v", resp) - } - if len(respEDNS.Answer) == 0 || len(respEDNS.Answer) != len(respEDNS.Extra) { - t.Errorf("bad edns answer length: %#v", resp) - } + t.Parallel() + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy"`) + + req, resp := &dns.Msg{}, &dns.Msg{} + + for i := 0; i < 100; i++ { + target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 150+i) + srv := &dns.SRV{ + Hdr: dns.RR_Header{ + Name: "redis-cache-redis.service.consul.", + Rrtype: dns.TypeSRV, + Class: dns.ClassINET, + }, + Target: target, + } + a := &dns.A{ + Hdr: dns.RR_Header{ + Name: target, + Rrtype: dns.TypeA, + Class: dns.ClassINET, + }, + A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 150+i)), + } - // Due to the compression, we can't check exact equality of sizes, but we can - // make two requests and ensure that the edns one returns a larger payload - // than the non-edns0 one. - if len(resp.Answer) >= len(respEDNS.Answer) { - t.Errorf("expected edns have larger answer: %#v\n%#v", resp, respEDNS) - } - if len(resp.Extra) >= len(respEDNS.Extra) { - t.Errorf("expected edns have larger extra: %#v\n%#v", resp, respEDNS) - } + resp.Answer = append(resp.Answer, srv) + resp.Extra = append(resp.Extra, a) + } - // Verify that the things point where they should - for i := range resp.Answer { - srv, ok := resp.Answer[i].(*dns.SRV) - if !ok { - t.Errorf("%d should be an SRV", i) - } + // Copy over to a new slice since we are trimming both. + reqEDNS, respEDNS := &dns.Msg{}, &dns.Msg{} + reqEDNS.SetEdns0(2048, true) + respEDNS.Answer = append(respEDNS.Answer, resp.Answer...) + respEDNS.Extra = append(respEDNS.Extra, resp.Extra...) - a, ok := resp.Extra[i].(*dns.A) - if !ok { - t.Errorf("%d should be an A", i) - } + // Trim each response + if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); !trimmed { + t.Errorf("expected response to be trimmed: %#v", resp) + } + if trimmed := trimUDPResponse(reqEDNS, respEDNS, cfg.DNSUDPAnswerLimit); !trimmed { + t.Errorf("expected edns to be trimmed: %#v", resp) + } - if srv.Target != a.Header().Name { - t.Errorf("%d: bad %#v vs. %#v", i, srv, a) - } - } - }) + // Check answer lengths + if len(resp.Answer) == 0 || len(resp.Answer) != len(resp.Extra) { + t.Errorf("bad response answer length: %#v", resp) + } + if len(respEDNS.Answer) == 0 || len(respEDNS.Answer) != len(respEDNS.Extra) { + t.Errorf("bad edns answer length: %#v", resp) } -} -func TestDNS_trimUDPResponse_TrimSizeMaxSize(t *testing.T) { - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { + // Due to the compression, we can't check exact equality of sizes, but we can + // make two requests and ensure that the edns one returns a larger payload + // than the non-edns0 one. + if len(resp.Answer) >= len(respEDNS.Answer) { + t.Errorf("expected edns have larger answer: %#v\n%#v", resp, respEDNS) + } + if len(resp.Extra) >= len(respEDNS.Extra) { + t.Errorf("expected edns have larger extra: %#v\n%#v", resp, respEDNS) + } - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `+experimentsHCL) + // Verify that the things point where they should + for i := range resp.Answer { + srv, ok := resp.Answer[i].(*dns.SRV) + if !ok { + t.Errorf("%d should be an SRV", i) + } - resp := &dns.Msg{} + a, ok := resp.Extra[i].(*dns.A) + if !ok { + t.Errorf("%d should be an A", i) + } - for i := 0; i < 600; i++ { - target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 150+i) - srv := &dns.SRV{ - Hdr: dns.RR_Header{ - Name: "redis-cache-redis.service.consul.", - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - }, - Target: target, - } - a := &dns.A{ - Hdr: dns.RR_Header{ - Name: target, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - }, - A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 150+i)), - } + if srv.Target != a.Header().Name { + t.Errorf("%d: bad %#v vs. %#v", i, srv, a) + } + } +} - resp.Answer = append(resp.Answer, srv) - resp.Extra = append(resp.Extra, a) - } +func TestDNS_trimUDPResponse_TrimSizeMaxSize(t *testing.T) { + t.Parallel() + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy"`) + + resp := &dns.Msg{} + + for i := 0; i < 600; i++ { + target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 150+i) + srv := &dns.SRV{ + Hdr: dns.RR_Header{ + Name: "redis-cache-redis.service.consul.", + Rrtype: dns.TypeSRV, + Class: dns.ClassINET, + }, + Target: target, + } + a := &dns.A{ + Hdr: dns.RR_Header{ + Name: target, + Rrtype: dns.TypeA, + Class: dns.ClassINET, + }, + A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 150+i)), + } + + resp.Answer = append(resp.Answer, srv) + resp.Extra = append(resp.Extra, a) + } - reqEDNS, respEDNS := &dns.Msg{}, &dns.Msg{} - reqEDNS.SetEdns0(math.MaxUint16, true) - respEDNS.Answer = append(respEDNS.Answer, resp.Answer...) - respEDNS.Extra = append(respEDNS.Extra, resp.Extra...) - require.Greater(t, respEDNS.Len(), math.MaxUint16) - t.Logf("length is: %v", respEDNS.Len()) + reqEDNS, respEDNS := &dns.Msg{}, &dns.Msg{} + reqEDNS.SetEdns0(math.MaxUint16, true) + respEDNS.Answer = append(respEDNS.Answer, resp.Answer...) + respEDNS.Extra = append(respEDNS.Extra, resp.Extra...) + require.Greater(t, respEDNS.Len(), math.MaxUint16) + t.Logf("length is: %v", respEDNS.Len()) - if trimmed := trimUDPResponse(reqEDNS, respEDNS, cfg.DNSUDPAnswerLimit); !trimmed { - t.Errorf("expected edns to be trimmed: %#v", resp) - } - require.Greater(t, math.MaxUint16, respEDNS.Len()) + if trimmed := trimUDPResponse(reqEDNS, respEDNS, cfg.DNSUDPAnswerLimit); !trimmed { + t.Errorf("expected edns to be trimmed: %#v", resp) + } + require.Greater(t, math.MaxUint16, respEDNS.Len()) - t.Logf("length is: %v", respEDNS.Len()) + t.Logf("length is: %v", respEDNS.Len()) - if len(respEDNS.Answer) == 0 || len(respEDNS.Answer) != len(respEDNS.Extra) { - t.Errorf("bad edns answer length: %#v", resp) - } - }) + if len(respEDNS.Answer) == 0 || len(respEDNS.Answer) != len(respEDNS.Extra) { + t.Errorf("bad edns answer length: %#v", resp) } + } func TestDNS_syncExtra(t *testing.T) { + t.Parallel() resp := &dns.Msg{ Answer: []dns.RR{ // These two are on the same host so the redundant extra @@ -3132,25 +7902,21 @@ func TestDNS_syncExtra(t *testing.T) { } func TestDNS_Compression_trimUDPResponse(t *testing.T) { - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - - cfg := loadRuntimeConfig(t, `data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `+experimentsHCL) + t.Parallel() + cfg := loadRuntimeConfig(t, `data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy"`) - req, m := dns.Msg{}, dns.Msg{} - trimUDPResponse(&req, &m, cfg.DNSUDPAnswerLimit) - if m.Compress { - t.Fatalf("compression should be off") - } + req, m := dns.Msg{}, dns.Msg{} + trimUDPResponse(&req, &m, cfg.DNSUDPAnswerLimit) + if m.Compress { + t.Fatalf("compression should be off") + } - // The trim function temporarily turns off compression, so we need to - // make sure the setting gets restored properly. - m.Compress = true - trimUDPResponse(&req, &m, cfg.DNSUDPAnswerLimit) - if !m.Compress { - t.Fatalf("compression should be on") - } - }) + // The trim function temporarily turns off compression, so we need to + // make sure the setting gets restored properly. + m.Compress = true + trimUDPResponse(&req, &m, cfg.DNSUDPAnswerLimit) + if !m.Compress { + t.Fatalf("compression should be on") } } @@ -3159,93 +7925,145 @@ func TestDNS_Compression_Query(t *testing.T) { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + // Register a node with a service. + { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"primary"}, + Port: 12345, + }, + } - // Register a node with a service. - { - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - Service: "db", - Tags: []string{"primary"}, - Port: 12345, - }, - } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + } - var out struct{} - if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - } + // Register an equivalent prepared query. + var id string + { + args := &structs.PreparedQueryRequest{ + Datacenter: "dc1", + Op: structs.PreparedQueryCreate, + Query: &structs.PreparedQuery{ + Name: "test", + Service: structs.ServiceQuery{ + Service: "db", + }, + }, + } + if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { + t.Fatalf("err: %v", err) + } + } - // Register an equivalent prepared query. - var id string - { - args := &structs.PreparedQueryRequest{ - Datacenter: "dc1", - Op: structs.PreparedQueryCreate, - Query: &structs.PreparedQuery{ - Name: "test", - Service: structs.ServiceQuery{ - Service: "db", - }, - }, - } - if err := a.RPC(context.Background(), "PreparedQuery.Apply", args, &id); err != nil { - t.Fatalf("err: %v", err) - } - } + // Look up the service directly and via prepared query. + questions := []string{ + "db.service.consul.", + id + ".query.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) - // Look up the service directly and via prepared query. - questions := []string{ - "db.service.consul.", - id + ".query.consul.", - } - for _, question := range questions { - m := new(dns.Msg) - m.SetQuestion(question, dns.TypeSRV) + conn, err := dns.Dial("udp", a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - conn, err := dns.Dial("udp", a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + // Do a manual exchange with compression on (the default). + a.DNSDisableCompression(false) + if err := conn.WriteMsg(m); err != nil { + t.Fatalf("err: %v", err) + } + p := make([]byte, dns.MaxMsgSize) + compressed, err := conn.Read(p) + if err != nil { + t.Fatalf("err: %v", err) + } - // Do a manual exchange with compression on (the default). - a.DNSDisableCompression(false) - if err := conn.WriteMsg(m); err != nil { - t.Fatalf("err: %v", err) - } - p := make([]byte, dns.MaxMsgSize) - compressed, err := conn.Read(p) - if err != nil { - t.Fatalf("err: %v", err) - } + // Disable compression and try again. + a.DNSDisableCompression(true) + if err := conn.WriteMsg(m); err != nil { + t.Fatalf("err: %v", err) + } + unc, err := conn.Read(p) + if err != nil { + t.Fatalf("err: %v", err) + } - // Disable compression and try again. - a.DNSDisableCompression(true) - if err := conn.WriteMsg(m); err != nil { - t.Fatalf("err: %v", err) - } - unc, err := conn.Read(p) - if err != nil { - t.Fatalf("err: %v", err) - } + // We can't see the compressed status given the DNS API, so we + // just make sure the message is smaller to see if it's + // respecting the flag. + if compressed == 0 || unc == 0 || compressed >= unc { + t.Fatalf("'%s' doesn't look compressed: %d vs. %d", question, compressed, unc) + } + } +} - // We can't see the compressed status given the DNS API, so we - // just make sure the message is smaller to see if it's - // respecting the flag. - if compressed == 0 || unc == 0 || compressed >= unc { - t.Fatalf("'%s' doesn't look compressed: %d vs. %d", question, compressed, unc) - } - } - }) +func TestDNS_Compression_ReverseLookup(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") + + // Register node. + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo2", + Address: "127.0.0.2", + } + var out struct{} + if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m := new(dns.Msg) + m.SetQuestion("2.0.0.127.in-addr.arpa.", dns.TypeANY) + + conn, err := dns.Dial("udp", a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Do a manual exchange with compression on (the default). + if err := conn.WriteMsg(m); err != nil { + t.Fatalf("err: %v", err) + } + p := make([]byte, dns.MaxMsgSize) + compressed, err := conn.Read(p) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Disable compression and try again. + a.DNSDisableCompression(true) + if err := conn.WriteMsg(m); err != nil { + t.Fatalf("err: %v", err) + } + unc, err := conn.Read(p) + if err != nil { + t.Fatalf("err: %v", err) + } + + // We can't see the compressed status given the DNS API, so we just make + // sure the message is smaller to see if it's respecting the flag. + if compressed == 0 || unc == 0 || compressed >= unc { + t.Fatalf("doesn't look compressed: %d vs. %d", compressed, unc) } } @@ -3254,62 +8072,83 @@ func TestDNS_Compression_Recurse(t *testing.T) { t.Skip("too slow for testing.Short") } + t.Parallel() recursor := makeRecursor(t, dns.Msg{ Answer: []dns.RR{dnsA("apple.com", "1.2.3.4")}, }) defer recursor.Shutdown() - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - - a := NewTestAgent(t, ` + a := NewTestAgent(t, ` recursors = ["`+recursor.Addr+`"] - `+experimentsHCL) - defer a.Shutdown() - testrpc.WaitForTestAgent(t, a.RPC, "dc1") + `) + defer a.Shutdown() + testrpc.WaitForTestAgent(t, a.RPC, "dc1") - m := new(dns.Msg) - m.SetQuestion("apple.com.", dns.TypeANY) + m := new(dns.Msg) + m.SetQuestion("apple.com.", dns.TypeANY) - conn, err := dns.Dial("udp", a.DNSAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } + conn, err := dns.Dial("udp", a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } - // Do a manual exchange with compression on (the default). - if err := conn.WriteMsg(m); err != nil { - t.Fatalf("err: %v", err) - } - p := make([]byte, dns.MaxMsgSize) - compressed, err := conn.Read(p) - if err != nil { - t.Fatalf("err: %v", err) - } + // Do a manual exchange with compression on (the default). + if err := conn.WriteMsg(m); err != nil { + t.Fatalf("err: %v", err) + } + p := make([]byte, dns.MaxMsgSize) + compressed, err := conn.Read(p) + if err != nil { + t.Fatalf("err: %v", err) + } - // Disable compression and try again. - a.DNSDisableCompression(true) - if err := conn.WriteMsg(m); err != nil { - t.Fatalf("err: %v", err) - } - unc, err := conn.Read(p) - if err != nil { - t.Fatalf("err: %v", err) - } + // Disable compression and try again. + a.DNSDisableCompression(true) + if err := conn.WriteMsg(m); err != nil { + t.Fatalf("err: %v", err) + } + unc, err := conn.Read(p) + if err != nil { + t.Fatalf("err: %v", err) + } + + // We can't see the compressed status given the DNS API, so we just make + // sure the message is smaller to see if it's respecting the flag. + if compressed == 0 || unc == 0 || compressed >= unc { + t.Fatalf("doesn't look compressed: %d vs. %d", compressed, unc) + } +} - // We can't see the compressed status given the DNS API, so we just make - // sure the message is smaller to see if it's respecting the flag. - if compressed == 0 || unc == 0 || compressed >= unc { - t.Fatalf("doesn't look compressed: %d vs. %d", compressed, unc) +func TestDNSInvalidRegex(t *testing.T) { + tests := []struct { + desc string + in string + invalid bool + }{ + {"Valid Hostname", "testnode", false}, + {"Valid Hostname", "test-node", false}, + {"Invalid Hostname with special chars", "test#$$!node", true}, + {"Invalid Hostname with special chars in the end", "testnode%^", true}, + {"Whitespace", " ", true}, + {"Only special chars", "./$", true}, + } + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + if got, want := agentdns.InvalidNameRe.MatchString(test.in), test.invalid; got != want { + t.Fatalf("Expected %v to return %v", test.in, want) } }) + } } -func TestDNS_V1ConfigReload(t *testing.T) { +func TestDNS_ConfigReload(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } + t.Parallel() + a := NewTestAgent(t, ` recursors = ["8.8.8.8:53"] dns_config = { @@ -3339,12 +8178,9 @@ func TestDNS_V1ConfigReload(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") for _, s := range a.dnsServers { - server, ok := s.(*DNSServer) - require.True(t, ok) - - cfg := server.config.Load().(*dnsConfig) + cfg := s.config.Load().(*dnsConfig) require.Equal(t, []string{"8.8.8.8:53"}, cfg.Recursors) - require.Equal(t, structs.RecursorStrategy("sequential"), cfg.RecursorStrategy) + require.Equal(t, agentdns.RecursorStrategy("sequential"), cfg.RecursorStrategy) require.False(t, cfg.AllowStale) require.Equal(t, 20*time.Second, cfg.MaxStale) require.Equal(t, 10*time.Second, cfg.NodeTTL) @@ -3389,12 +8225,9 @@ func TestDNS_V1ConfigReload(t *testing.T) { require.NoError(t, err) for _, s := range a.dnsServers { - server, ok := s.(*DNSServer) - require.True(t, ok) - - cfg := server.config.Load().(*dnsConfig) + cfg := s.config.Load().(*dnsConfig) require.Equal(t, []string{"1.1.1.1:53"}, cfg.Recursors) - require.Equal(t, structs.RecursorStrategy("random"), cfg.RecursorStrategy) + require.Equal(t, agentdns.RecursorStrategy("random"), cfg.RecursorStrategy) require.True(t, cfg.AllowStale) require.Equal(t, 21*time.Second, cfg.MaxStale) require.Equal(t, 11*time.Second, cfg.NodeTTL) @@ -3417,83 +8250,77 @@ func TestDNS_V1ConfigReload(t *testing.T) { require.Equal(t, uint32(30), cfg.SOAConfig.Expire) require.Equal(t, uint32(40), cfg.SOAConfig.Minttl) } - } -// TODO (v2-dns) add a test for checking the V2 DNS Server reloads the config (NET-8056) - func TestDNS_ReloadConfig_DuringQuery(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") } - for name, experimentsHCL := range getVersionHCL(true) { - t.Run(name, func(t *testing.T) { - a := NewTestAgent(t, experimentsHCL) - defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc1") - m := MockPreparedQuery{ - executeFn: func(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExecuteResponse) error { - time.Sleep(100 * time.Millisecond) - reply.Nodes = structs.CheckServiceNodes{ - { - Node: &structs.Node{ - ID: "my_node", - Address: "127.0.0.1", - }, - Service: &structs.NodeService{ - Address: "127.0.0.1", - Port: 8080, - }, - }, - } - return nil + m := MockPreparedQuery{ + executeFn: func(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExecuteResponse) error { + time.Sleep(100 * time.Millisecond) + reply.Nodes = structs.CheckServiceNodes{ + { + Node: &structs.Node{ + ID: "my_node", + Address: "127.0.0.1", + }, + Service: &structs.NodeService{ + Address: "127.0.0.1", + Port: 8080, + }, }, } + return nil + }, + } - err := a.registerEndpoint("PreparedQuery", &m) - require.NoError(t, err) + err := a.registerEndpoint("PreparedQuery", &m) + require.NoError(t, err) - { - m := new(dns.Msg) - m.SetQuestion("nope.query.consul.", dns.TypeA) + { + m := new(dns.Msg) + m.SetQuestion("nope.query.consul.", dns.TypeA) - timeout := time.NewTimer(time.Second) - res := make(chan *dns.Msg) - errs := make(chan error) + timeout := time.NewTimer(time.Second) + res := make(chan *dns.Msg) + errs := make(chan error) - go func() { - c := new(dns.Client) - in, _, err := c.Exchange(m, a.DNSAddr()) - if err != nil { - errs <- err - return - } - res <- in - }() - - time.Sleep(50 * time.Millisecond) - - // reload the config halfway through, that should not affect the ongoing query - newCfg := *a.Config - newCfg.DNSAllowStale = true - a.reloadConfigInternal(&newCfg) - - select { - case in := <-res: - require.Equal(t, "127.0.0.1", in.Answer[0].(*dns.A).A.String()) - case err := <-errs: - require.NoError(t, err) - case <-timeout.C: - require.FailNow(t, "timeout") - } + go func() { + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + errs <- err + return } - }) + res <- in + }() + + time.Sleep(50 * time.Millisecond) + + // reload the config halfway through, that should not affect the ongoing query + newCfg := *a.Config + newCfg.DNSAllowStale = true + a.reloadConfigInternal(&newCfg) + + select { + case in := <-res: + require.Equal(t, "127.0.0.1", in.Answer[0].(*dns.A).A.String()) + case err := <-errs: + require.NoError(t, err) + case <-timeout.C: + require.FailNow(t, "timeout") + } } } -func TestDNS_ECSNotGlobalError(t *testing.T) { +func TestECSNotGlobalError(t *testing.T) { t.Run("wrap nil", func(t *testing.T) { e := ecsNotGlobalError{} require.True(t, errors.Is(e, errECSNotGlobal)) @@ -3525,7 +8352,7 @@ func perfectlyRandomChoices(size int, frac float64) []bool { return out } -func TestDNS_PerfectlyRandomChoices(t *testing.T) { +func TestPerfectlyRandomChoices(t *testing.T) { count := func(got []bool) int { var x int for _, v := range got { @@ -3577,71 +8404,3 @@ func TestDNS_PerfectlyRandomChoices(t *testing.T) { }) } } - -type testCaseParseLocality struct { - name string - labels []string - defaultEntMeta acl.EnterpriseMeta - enterpriseDNSConfig enterpriseDNSConfig - expectedResult queryLocality - expectedOK bool -} - -func TestDNS_ParseLocality(t *testing.T) { - testCases := getTestCasesParseLocality() - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - d := &DNSServer{ - defaultEnterpriseMeta: tc.defaultEntMeta, - } - actualResult, actualOK := d.parseLocality(tc.labels, &dnsConfig{ - enterpriseDNSConfig: tc.enterpriseDNSConfig, - }) - require.Equal(t, tc.expectedOK, actualOK) - require.Equal(t, tc.expectedResult, actualResult) - - }) - } - -} - -func TestDNS_EffectiveDatacenter(t *testing.T) { - type testCase struct { - name string - queryLocality queryLocality - defaultDC string - expected string - } - testCases := []testCase{ - { - name: "return datacenter first", - queryLocality: queryLocality{ - datacenter: "test-dc", - peerOrDatacenter: "test-peer", - }, - defaultDC: "default-dc", - expected: "test-dc", - }, - { - name: "return PeerOrDatacenter second", - queryLocality: queryLocality{ - peerOrDatacenter: "test-peer", - }, - defaultDC: "default-dc", - expected: "test-peer", - }, - { - name: "return defaultDC as fallback", - queryLocality: queryLocality{}, - defaultDC: "default-dc", - expected: "default-dc", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - got := tc.queryLocality.effectiveDatacenter(tc.defaultDC) - require.Equal(t, tc.expected, got) - }) - } -} diff --git a/agent/enterprise_delegate_ce.go b/agent/enterprise_delegate_ce.go index 85128b8b3c074..39ae3db7c46d2 100644 --- a/agent/enterprise_delegate_ce.go +++ b/agent/enterprise_delegate_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/envoyextensions/builtin/aws-lambda/aws_lambda.go b/agent/envoyextensions/builtin/aws-lambda/aws_lambda.go index 978fc5cf5552b..fa36d6fa500d9 100644 --- a/agent/envoyextensions/builtin/aws-lambda/aws_lambda.go +++ b/agent/envoyextensions/builtin/aws-lambda/aws_lambda.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package awslambda diff --git a/agent/envoyextensions/builtin/aws-lambda/aws_lambda_test.go b/agent/envoyextensions/builtin/aws-lambda/aws_lambda_test.go index 3dda09e317a47..26f49eef4bd16 100644 --- a/agent/envoyextensions/builtin/aws-lambda/aws_lambda_test.go +++ b/agent/envoyextensions/builtin/aws-lambda/aws_lambda_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package awslambda diff --git a/agent/envoyextensions/builtin/ext-authz/ext_authz.go b/agent/envoyextensions/builtin/ext-authz/ext_authz.go index 00e1d47640c4a..7400aef13a04c 100644 --- a/agent/envoyextensions/builtin/ext-authz/ext_authz.go +++ b/agent/envoyextensions/builtin/ext-authz/ext_authz.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package extauthz @@ -23,8 +23,6 @@ type extAuthz struct { ProxyType api.ServiceKind // InsertOptions controls how the extension inserts the filter. InsertOptions ext_cmn.InsertOptions - // ListenerType controls which listener the extension applies to. It supports "inbound" or "outbound" listeners. - ListenerType string // Config holds the extension configuration. Config extAuthzConfig } @@ -63,14 +61,10 @@ func (a *extAuthz) PatchClusters(cfg *ext_cmn.RuntimeConfig, c ext_cmn.ClusterMa return c, nil } -func (a *extAuthz) matchesListenerDirection(isInboundListener bool) bool { - return (!isInboundListener && a.ListenerType == "outbound") || (isInboundListener && a.ListenerType == "inbound") -} - // PatchFilters inserts an ext-authz filter into the list of network filters or the filter chain of the HTTP connection manager. func (a *extAuthz) PatchFilters(cfg *ext_cmn.RuntimeConfig, filters []*envoy_listener_v3.Filter, isInboundListener bool) ([]*envoy_listener_v3.Filter, error) { // The ext_authz extension only patches filters for inbound listeners. - if !a.matchesListenerDirection(isInboundListener) { + if !isInboundListener { return filters, nil } @@ -135,11 +129,6 @@ func (a *extAuthz) normalize() { if a.ProxyType == "" { a.ProxyType = api.ServiceKindConnectProxy } - - if a.ListenerType == "" { - a.ListenerType = "inbound" - } - a.Config.normalize() } @@ -151,10 +140,6 @@ func (a *extAuthz) validate() error { api.ServiceKindConnectProxy)) } - if a.ListenerType != "inbound" && a.ListenerType != "outbound" { - resultErr = multierror.Append(resultErr, fmt.Errorf(`unexpected ListenerType %q, supported values are "inbound" or "outbound"`, a.ListenerType)) - } - if err := a.Config.validate(); err != nil { resultErr = multierror.Append(resultErr, err) } diff --git a/agent/envoyextensions/builtin/ext-authz/ext_authz_test.go b/agent/envoyextensions/builtin/ext-authz/ext_authz_test.go index 6db284476dd71..88e87d7e9a8f8 100644 --- a/agent/envoyextensions/builtin/ext-authz/ext_authz_test.go +++ b/agent/envoyextensions/builtin/ext-authz/ext_authz_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package extauthz diff --git a/agent/envoyextensions/builtin/ext-authz/structs.go b/agent/envoyextensions/builtin/ext-authz/structs.go index d85d6c0225875..a14cedd63a765 100644 --- a/agent/envoyextensions/builtin/ext-authz/structs.go +++ b/agent/envoyextensions/builtin/ext-authz/structs.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package extauthz @@ -547,7 +547,8 @@ func (s *StringMatcher) toEnvoy() *envoy_type_matcher_v3.StringMatcher { return &envoy_type_matcher_v3.StringMatcher{ MatchPattern: &envoy_type_matcher_v3.StringMatcher_SafeRegex{ SafeRegex: &envoy_type_matcher_v3.RegexMatcher{ - Regex: s.SafeRegex, + EngineType: &envoy_type_matcher_v3.RegexMatcher_GoogleRe2{}, + Regex: s.SafeRegex, }, }, } diff --git a/agent/envoyextensions/builtin/lua/lua.go b/agent/envoyextensions/builtin/lua/lua.go index 91e912842ef41..8573252af67e5 100644 --- a/agent/envoyextensions/builtin/lua/lua.go +++ b/agent/envoyextensions/builtin/lua/lua.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package lua diff --git a/agent/envoyextensions/builtin/lua/lua_test.go b/agent/envoyextensions/builtin/lua/lua_test.go index afe65d067f433..3ea2ba716c1de 100644 --- a/agent/envoyextensions/builtin/lua/lua_test.go +++ b/agent/envoyextensions/builtin/lua/lua_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package lua diff --git a/agent/envoyextensions/builtin/otel-access-logging/otel_access_logging.go b/agent/envoyextensions/builtin/otel-access-logging/otel_access_logging.go deleted file mode 100644 index 2f003b5525826..0000000000000 --- a/agent/envoyextensions/builtin/otel-access-logging/otel_access_logging.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package otelaccesslogging - -import ( - "fmt" - - envoy_extensions_access_loggers_v3 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" - envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - envoy_extensions_access_loggers_otel_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/open_telemetry/v3" - "github.com/mitchellh/mapstructure" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/hashicorp/consul/api" - ext_cmn "github.com/hashicorp/consul/envoyextensions/extensioncommon" - "github.com/hashicorp/go-multierror" - v1 "go.opentelemetry.io/proto/otlp/common/v1" -) - -type otelAccessLogging struct { - ext_cmn.BasicExtensionAdapter - - // ProxyType identifies the type of Envoy proxy that this extension applies to. - // The extension will only be configured for proxies that match this type and - // will be ignored for all other proxy types. - ProxyType api.ServiceKind - // ListenerType controls which listener the extension applies to. It supports "inbound" or "outbound" listeners. - ListenerType string - // Config holds the extension configuration. - Config AccessLog -} - -var _ ext_cmn.BasicExtension = (*otelAccessLogging)(nil) - -func Constructor(ext api.EnvoyExtension) (ext_cmn.EnvoyExtender, error) { - otel, err := newOTELAccessLogging(ext) - if err != nil { - return nil, err - } - return &ext_cmn.BasicEnvoyExtender{ - Extension: otel, - }, nil -} - -// CanApply indicates if the extension can be applied to the given extension runtime configuration. -func (a *otelAccessLogging) CanApply(config *ext_cmn.RuntimeConfig) bool { - return config.Kind == api.ServiceKindConnectProxy -} - -// PatchClusters modifies the cluster resources for the extension. -// -// If the extension is configured to target the OTEL service running on the local host network -// this func will insert a cluster for calling that service. It does nothing if the extension is -// configured to target an upstream service because the existing cluster for the upstream will be -// used directly by the filter. -func (a *otelAccessLogging) PatchClusters(cfg *ext_cmn.RuntimeConfig, c ext_cmn.ClusterMap) (ext_cmn.ClusterMap, error) { - cluster, err := a.Config.toEnvoyCluster(cfg) - if err != nil { - return c, err - } - if cluster != nil { - c[cluster.Name] = cluster - } - return c, nil -} - -func (a *otelAccessLogging) matchesListenerDirection(p ext_cmn.FilterPayload) bool { - isInboundListener := p.IsInbound() - return (!isInboundListener && a.ListenerType == "outbound") || (isInboundListener && a.ListenerType == "inbound") -} - -// PatchFilter adds the OTEL access log in the HTTP connection manager. -func (a *otelAccessLogging) PatchFilter(p ext_cmn.FilterPayload) (*envoy_listener_v3.Filter, bool, error) { - filter := p.Message - // Make sure filter matches extension config. - if !a.matchesListenerDirection(p) { - return filter, false, nil - } - - httpConnectionManager, _, err := ext_cmn.GetHTTPConnectionManager(filter) - if err != nil { - return filter, false, err - } - - accessLog, err := a.toEnvoyAccessLog(p.RuntimeConfig) - if err != nil { - return filter, false, err - } - - httpConnectionManager.AccessLog = append(httpConnectionManager.AccessLog, accessLog) - newHCM, err := ext_cmn.MakeFilter("envoy.filters.network.http_connection_manager", httpConnectionManager) - if err != nil { - return filter, false, err - } - - return newHCM, true, nil -} - -func newOTELAccessLogging(ext api.EnvoyExtension) (*otelAccessLogging, error) { - otel := &otelAccessLogging{} - if ext.Name != api.BuiltinOTELAccessLoggingExtension { - return otel, fmt.Errorf("expected extension name %q but got %q", api.BuiltinOTELAccessLoggingExtension, ext.Name) - } - if err := otel.fromArguments(ext.Arguments); err != nil { - return otel, err - } - - return otel, nil -} - -func (a *otelAccessLogging) fromArguments(args map[string]any) error { - if err := mapstructure.Decode(args, a); err != nil { - return err - } - a.normalize() - return a.validate() -} - -func (a *otelAccessLogging) toEnvoyAccessLog(cfg *ext_cmn.RuntimeConfig) (*envoy_extensions_access_loggers_v3.AccessLog, error) { - commonConfig, err := a.Config.toEnvoyCommonGrpcAccessLogConfig(cfg) - if err != nil { - return nil, err - } - - body, err := toEnvoyAnyValue(a.Config.Body) - if err != nil { - return nil, fmt.Errorf("failed to marshal Body: %w", err) - } - - attributes, err := toEnvoyKeyValueList(a.Config.Attributes) - if err != nil { - return nil, fmt.Errorf("failed to marshal Attributes: %w", err) - } - - resourceAttributes, err := toEnvoyKeyValueList(a.Config.ResourceAttributes) - if err != nil { - return nil, fmt.Errorf("failed to marshal ResourceAttributes: %w", err) - } - - otelAccessLogConfig := &envoy_extensions_access_loggers_otel_v3.OpenTelemetryAccessLogConfig{ - CommonConfig: commonConfig, - Body: body, - Attributes: attributes, - ResourceAttributes: resourceAttributes, - } - - // Marshal the struct to bytes. - otelAccessLogConfigBytes, err := proto.Marshal(otelAccessLogConfig) - if err != nil { - return nil, fmt.Errorf("failed to marshal OpenTelemetryAccessLogConfig: %w", err) - } - - return &envoy_extensions_access_loggers_v3.AccessLog{ - Name: "envoy.access_loggers.open_telemetry", - ConfigType: &envoy_extensions_access_loggers_v3.AccessLog_TypedConfig{ - TypedConfig: &anypb.Any{ - Value: otelAccessLogConfigBytes, - TypeUrl: "type.googleapis.com/envoy.extensions.access_loggers.open_telemetry.v3.OpenTelemetryAccessLogConfig", - }, - }, - }, nil -} - -func (a *otelAccessLogging) normalize() { - if a.ProxyType == "" { - a.ProxyType = api.ServiceKindConnectProxy - } - - if a.ListenerType == "" { - a.ListenerType = "inbound" - } - - if a.Config.LogName == "" { - a.Config.LogName = a.ListenerType - } - - a.Config.normalize() -} - -func (a *otelAccessLogging) validate() error { - var resultErr error - if a.ProxyType != api.ServiceKindConnectProxy { - resultErr = multierror.Append(resultErr, fmt.Errorf("unsupported ProxyType %q, only %q is supported", - a.ProxyType, - api.ServiceKindConnectProxy)) - } - - if a.ListenerType != "inbound" && a.ListenerType != "outbound" { - resultErr = multierror.Append(resultErr, fmt.Errorf(`unexpected ListenerType %q, supported values are "inbound" or "outbound"`, a.ListenerType)) - } - - if err := a.Config.validate(); err != nil { - resultErr = multierror.Append(resultErr, err) - } - - return resultErr -} - -func toEnvoyKeyValueList(attributes map[string]any) (*v1.KeyValueList, error) { - keyValueList := &v1.KeyValueList{} - for key, value := range attributes { - anyValue, err := toEnvoyAnyValue(value) - if err != nil { - return nil, err - } - keyValueList.Values = append(keyValueList.Values, &v1.KeyValue{ - Key: key, - Value: anyValue, - }) - } - - return keyValueList, nil -} - -func toEnvoyAnyValue(value interface{}) (*v1.AnyValue, error) { - if value == nil { - return nil, nil - } - - switch v := value.(type) { - case string: - return &v1.AnyValue{ - Value: &v1.AnyValue_StringValue{ - StringValue: v, - }, - }, nil - case int: - return &v1.AnyValue{ - Value: &v1.AnyValue_IntValue{ - IntValue: int64(v), - }, - }, nil - case int32: - return &v1.AnyValue{ - Value: &v1.AnyValue_IntValue{ - IntValue: int64(v), - }, - }, nil - case int64: - return &v1.AnyValue{ - Value: &v1.AnyValue_IntValue{ - IntValue: v, - }, - }, nil - case float32: - return &v1.AnyValue{ - Value: &v1.AnyValue_DoubleValue{ - DoubleValue: float64(v), - }, - }, nil - case float64: - return &v1.AnyValue{ - Value: &v1.AnyValue_DoubleValue{ - DoubleValue: v, - }, - }, nil - case bool: - return &v1.AnyValue{ - Value: &v1.AnyValue_BoolValue{ - BoolValue: v, - }, - }, nil - case []byte: - return &v1.AnyValue{ - Value: &v1.AnyValue_BytesValue{ - BytesValue: v, - }, - }, nil - default: - return nil, fmt.Errorf("unsupported type %T", v) - } -} diff --git a/agent/envoyextensions/builtin/otel-access-logging/otel_access_logging_test.go b/agent/envoyextensions/builtin/otel-access-logging/otel_access_logging_test.go deleted file mode 100644 index 5c6b9ffa6636c..0000000000000 --- a/agent/envoyextensions/builtin/otel-access-logging/otel_access_logging_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package otelaccesslogging - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/envoyextensions/extensioncommon" -) - -func TestConstructor(t *testing.T) { - makeArguments := func(overrides map[string]interface{}) map[string]interface{} { - m := map[string]interface{}{ - "ProxyType": "connect-proxy", - "ListenerType": "inbound", - "Config": AccessLog{ - LogName: "access.log", - GrpcService: &GrpcService{ - Target: &Target{ - Service: api.CompoundServiceName{ - Name: "otel-collector", - Namespace: "default", - Partition: "default", - }, - }, - }, - }, - } - - for k, v := range overrides { - m[k] = v - } - - return m - } - - cases := map[string]struct { - extensionName string - arguments map[string]interface{} - expected otelAccessLogging - ok bool - }{ - "with no arguments": { - arguments: nil, - ok: false, - }, - "with an invalid name": { - arguments: makeArguments(map[string]interface{}{}), - extensionName: "bad", - ok: false, - }, - "invalid proxy type": { - arguments: makeArguments(map[string]interface{}{"ProxyType": "terminating-gateway"}), - ok: false, - }, - "invalid listener": { - arguments: makeArguments(map[string]interface{}{"ListenerType": "invalid"}), - ok: false, - }, - "default proxy type": { - arguments: makeArguments(map[string]interface{}{"ProxyType": ""}), - expected: otelAccessLogging{ - ProxyType: "connect-proxy", - ListenerType: "inbound", - Config: AccessLog{ - LogName: "access.log", - GrpcService: &GrpcService{ - Target: &Target{ - Service: api.CompoundServiceName{ - Name: "otel-collector", - Namespace: "default", - Partition: "default", - }, - }, - }, - }, - }, - ok: true, - }, - } - - for n, tc := range cases { - t.Run(n, func(t *testing.T) { - - extensionName := api.BuiltinOTELAccessLoggingExtension - if tc.extensionName != "" { - extensionName = tc.extensionName - } - - svc := api.CompoundServiceName{Name: "svc"} - ext := extensioncommon.RuntimeConfig{ - ServiceName: svc, - EnvoyExtension: api.EnvoyExtension{ - Name: extensionName, - Arguments: tc.arguments, - }, - } - - e, err := Constructor(ext.EnvoyExtension) - - if tc.ok { - require.NoError(t, err) - require.Equal(t, &extensioncommon.BasicEnvoyExtender{Extension: &tc.expected}, e) - } else { - require.Error(t, err) - } - }) - } -} diff --git a/agent/envoyextensions/builtin/otel-access-logging/structs.go b/agent/envoyextensions/builtin/otel-access-logging/structs.go deleted file mode 100644 index c6078679aa741..0000000000000 --- a/agent/envoyextensions/builtin/otel-access-logging/structs.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package otelaccesslogging - -import ( - "fmt" - "strconv" - "strings" - "time" - - envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - envoy_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - envoy_extensions_access_loggers_grpc_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/grpc/v3" - envoy_upstreams_http_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3" - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/api" - cmn "github.com/hashicorp/consul/envoyextensions/extensioncommon" - "github.com/hashicorp/go-multierror" - "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/protobuf/types/known/durationpb" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - LocalAccessLogClusterName = "local_access_log" - - localhost = "localhost" - localhostIPv4 = "127.0.0.1" - localhostIPv6 = "::1" -) - -type AccessLog struct { - LogName string - GrpcService *GrpcService - BufferFlushInterval *time.Duration - BufferSizeBytes uint32 - FilterStateObjectsToLog []string - RetryPolicy *RetryPolicy - Body any - Attributes map[string]any - ResourceAttributes map[string]any -} - -func (a *AccessLog) normalize() { - if a.GrpcService != nil { - a.GrpcService.normalize() - } - - if a.RetryPolicy != nil { - a.RetryPolicy.normalize() - } -} - -func (a *AccessLog) validate() error { - a.normalize() - - if a.GrpcService == nil { - return fmt.Errorf("missing GrpcService") - } - - var resultErr error - - var field string - var validate func() error - field = "GrpcService" - validate = a.GrpcService.validate - - if err := validate(); err != nil { - resultErr = multierror.Append(resultErr, fmt.Errorf("failed to validate Config.%s: %w", field, err)) - } - - return resultErr -} - -func (a *AccessLog) envoyGrpcService(cfg *cmn.RuntimeConfig) (*envoy_core_v3.GrpcService, error) { - target := a.GrpcService.Target - clusterName, err := a.getClusterName(cfg, target) - if err != nil { - return nil, err - } - - var initialMetadata []*envoy_core_v3.HeaderValue - for _, meta := range a.GrpcService.InitialMetadata { - initialMetadata = append(initialMetadata, meta.toEnvoy()) - } - - return &envoy_core_v3.GrpcService{ - TargetSpecifier: &envoy_core_v3.GrpcService_EnvoyGrpc_{ - EnvoyGrpc: &envoy_core_v3.GrpcService_EnvoyGrpc{ - ClusterName: clusterName, - Authority: a.GrpcService.Authority, - }, - }, - Timeout: target.timeoutDurationPB(), - InitialMetadata: initialMetadata, - }, nil -} - -// getClusterName returns the name of the cluster for the OpenTelemetry access logging service. -// If the extension is configured with an upstream OpenTelemetry access logging service then the name of the cluster for -// that upstream is returned. If the extension is configured with a URI, the only allowed host is `localhost` -// and the extension will insert a new cluster with the name "local_access_log", so we use that name. -func (a *AccessLog) getClusterName(cfg *cmn.RuntimeConfig, target *Target) (string, error) { - var err error - clusterName := LocalAccessLogClusterName - if target.isService() { - if clusterName, err = target.clusterName(cfg); err != nil { - return "", err - } - } - return clusterName, nil -} - -// toEnvoyCluster returns an Envoy cluster for connecting to the OpenTelemetry access logging service. -// If the extension is configured with the OpenTelemetry access logging service locally via the URI set to localhost, -// this func will return a new cluster definition that will allow the proxy to connect to the OpenTelemetry access logging -// service running on localhost on the configured port. -// -// If the extension is configured with the OpenTelemetry access logging service as an upstream there is no need to insert -// a new cluster so this method returns nil. -func (a *AccessLog) toEnvoyCluster(_ *cmn.RuntimeConfig) (*envoy_cluster_v3.Cluster, error) { - target := a.GrpcService.Target - - // If the target is an upstream we do not need to create a cluster. We will use the cluster of the upstream. - if target.isService() { - return nil, nil - } - - host, port, err := target.addr() - if err != nil { - return nil, err - } - - clusterType := &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STATIC} - if host == localhost { - // If the host is "localhost" use a STRICT_DNS cluster type to perform DNS lookup. - clusterType = &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STRICT_DNS} - } - - var typedExtProtoOpts map[string]*anypb.Any - - httpProtoOpts := &envoy_upstreams_http_v3.HttpProtocolOptions{ - UpstreamProtocolOptions: &envoy_upstreams_http_v3.HttpProtocolOptions_ExplicitHttpConfig_{ - ExplicitHttpConfig: &envoy_upstreams_http_v3.HttpProtocolOptions_ExplicitHttpConfig{ - ProtocolConfig: &envoy_upstreams_http_v3.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{}, - }, - }, - } - httpProtoOptsAny, err := anypb.New(httpProtoOpts) - if err != nil { - return nil, err - } - typedExtProtoOpts = make(map[string]*anypb.Any) - typedExtProtoOpts["envoy.extensions.upstreams.http.v3.HttpProtocolOptions"] = httpProtoOptsAny - - return &envoy_cluster_v3.Cluster{ - Name: LocalAccessLogClusterName, - ClusterDiscoveryType: clusterType, - ConnectTimeout: target.timeoutDurationPB(), - LoadAssignment: &envoy_endpoint_v3.ClusterLoadAssignment{ - ClusterName: LocalAccessLogClusterName, - Endpoints: []*envoy_endpoint_v3.LocalityLbEndpoints{ - { - LbEndpoints: []*envoy_endpoint_v3.LbEndpoint{{ - HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{ - Endpoint: &envoy_endpoint_v3.Endpoint{ - Address: &envoy_core_v3.Address{ - Address: &envoy_core_v3.Address_SocketAddress{ - SocketAddress: &envoy_core_v3.SocketAddress{ - Address: host, - PortSpecifier: &envoy_core_v3.SocketAddress_PortValue{ - PortValue: uint32(port), - }, - }, - }, - }, - }, - }, - }}, - }, - }, - }, - TypedExtensionProtocolOptions: typedExtProtoOpts, - }, nil -} - -func (a *AccessLog) toEnvoyCommonGrpcAccessLogConfig(cfg *cmn.RuntimeConfig) (*envoy_extensions_access_loggers_grpc_v3.CommonGrpcAccessLogConfig, error) { - config := &envoy_extensions_access_loggers_grpc_v3.CommonGrpcAccessLogConfig{ - LogName: a.LogName, - BufferSizeBytes: wrapperspb.UInt32(a.BufferSizeBytes), - FilterStateObjectsToLog: a.FilterStateObjectsToLog, - TransportApiVersion: envoy_core_v3.ApiVersion_V3, - } - - if a.BufferFlushInterval != nil { - config.BufferFlushInterval = durationpb.New(*a.BufferFlushInterval) - } - - if a.RetryPolicy != nil { - config.GrpcStreamRetryPolicy = a.RetryPolicy.toEnvoy() - } - - grpcSvc, err := a.envoyGrpcService(cfg) - if err != nil { - return nil, err - } - config.GrpcService = grpcSvc - - return config, nil -} - -type GrpcService struct { - Target *Target - Authority string - InitialMetadata []*HeaderValue -} - -func (v *GrpcService) normalize() { - if v == nil { - return - } - v.Target.normalize() -} - -func (v *GrpcService) validate() error { - var resultErr error - if v == nil { - return resultErr - } - - if v.Target == nil { - resultErr = multierror.Append(resultErr, fmt.Errorf("GrpcService.Target must be set")) - } - if err := v.Target.validate(); err != nil { - resultErr = multierror.Append(resultErr, err) - } - return resultErr -} - -type HeaderValue struct { - Key string - Value string -} - -func (h *HeaderValue) toEnvoy() *envoy_core_v3.HeaderValue { - if h == nil { - return nil - } - return &envoy_core_v3.HeaderValue{Key: h.Key, Value: h.Value} -} - -type Target struct { - Service api.CompoundServiceName - URI string - Timeout string - - timeout *time.Duration - host string - port int -} - -// addr returns the host and port for the target when the target is a URI. -// It returns a non-nil error if the target is not a URI. -func (t Target) addr() (string, int, error) { - if !t.isURI() { - return "", 0, fmt.Errorf("target is not configured with a URI, set Target.URI") - } - return t.host, t.port, nil -} - -// clusterName returns the cluster name for the target when the target is an upstream service. -// It searches through the upstreams in the provided runtime configuration and returns the name -// of the cluster for the first upstream service that matches the target service. -// It returns a non-nil error if a matching cluster is not found or if the target is not an -// upstream service. -func (t Target) clusterName(cfg *cmn.RuntimeConfig) (string, error) { - if !t.isService() { - return "", fmt.Errorf("target is not configured with an upstream service, set Target.Service") - } - - for service, upstream := range cfg.Upstreams { - if service == t.Service { - for sni := range upstream.SNIs { - return sni, nil - } - } - } - return "", fmt.Errorf("no upstream definition found for service %q", t.Service.Name) -} - -func (t Target) isService() bool { - return t.Service.Name != "" -} - -func (t Target) isURI() bool { - return t.URI != "" -} - -func (t *Target) normalize() { - if t == nil { - return - } - t.Service.Namespace = acl.NamespaceOrDefault(t.Service.Namespace) - t.Service.Partition = acl.PartitionOrDefault(t.Service.Partition) -} - -// timeoutDurationPB returns the target's timeout as a *durationpb.Duration. -// It returns nil if the timeout has not been explicitly set. -func (t *Target) timeoutDurationPB() *durationpb.Duration { - if t == nil || t.timeout == nil { - return nil - } - return durationpb.New(*t.timeout) -} - -func (t *Target) validate() error { - var err, resultErr error - if t == nil { - return resultErr - } - - if t.isURI() == t.isService() { - resultErr = multierror.Append(resultErr, fmt.Errorf("exactly one of Target.Service or Target.URI must be set")) - } - - if t.isURI() { - t.host, t.port, err = parseAddr(t.URI) - if err == nil { - switch t.host { - case localhost, localhostIPv4, localhostIPv6: - default: - resultErr = multierror.Append(resultErr, - fmt.Errorf("invalid host for Target.URI %q: expected %q, %q, or %q", t.URI, localhost, localhostIPv4, localhostIPv6)) - } - } else { - resultErr = multierror.Append(resultErr, fmt.Errorf("invalid format for Target.URI %q: expected host:port", t.URI)) - } - } - - if t.Timeout != "" { - if d, err := time.ParseDuration(t.Timeout); err == nil { - t.timeout = &d - } else { - resultErr = multierror.Append(resultErr, fmt.Errorf("failed to parse Target.Timeout %q as a duration: %w", t.Timeout, err)) - } - } - return resultErr -} - -type RetryPolicy struct { - RetryBackOff *RetryBackOff - NumRetries uint32 -} - -func (r *RetryPolicy) normalize() { - if r == nil { - return - } - r.RetryBackOff.normalize() -} - -func (r *RetryPolicy) toEnvoy() *envoy_core_v3.RetryPolicy { - if r == nil { - return nil - } - - return &envoy_core_v3.RetryPolicy{ - RetryBackOff: r.RetryBackOff.toEnvoy(), - NumRetries: wrapperspb.UInt32(r.NumRetries), - } -} - -type RetryBackOff struct { - BaseInterval *time.Duration - MaxInterval *time.Duration -} - -func (v *RetryBackOff) normalize() { - if v == nil { - return - } - - if v.BaseInterval == nil { - v.BaseInterval = new(time.Duration) - *v.BaseInterval = time.Second - } - - if v.MaxInterval == nil { - v.MaxInterval = new(time.Duration) - *v.MaxInterval = time.Second * 30 - } -} - -func (r *RetryBackOff) toEnvoy() *envoy_core_v3.BackoffStrategy { - if r == nil { - return nil - } - - return &envoy_core_v3.BackoffStrategy{ - BaseInterval: durationpb.New(*r.BaseInterval), - MaxInterval: durationpb.New(*r.MaxInterval), - } -} - -func parseAddr(s string) (host string, port int, err error) { - // Strip the protocol if one was provided - if _, addr, hasProto := strings.Cut(s, "://"); hasProto { - s = addr - } - idx := strings.LastIndex(s, ":") - switch idx { - case -1, len(s) - 1: - err = fmt.Errorf("invalid input format %q: expected host:port", s) - case 0: - host = localhost - port, err = strconv.Atoi(s[idx+1:]) - default: - host = s[:idx] - port, err = strconv.Atoi(s[idx+1:]) - } - return -} diff --git a/agent/envoyextensions/builtin/wasm/structs.go b/agent/envoyextensions/builtin/wasm/structs.go index 67540fee56c94..012099ab62036 100644 --- a/agent/envoyextensions/builtin/wasm/structs.go +++ b/agent/envoyextensions/builtin/wasm/structs.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package wasm diff --git a/agent/envoyextensions/builtin/wasm/wasm.go b/agent/envoyextensions/builtin/wasm/wasm.go index da1e0a7ceaa95..c16ac4da81c0e 100644 --- a/agent/envoyextensions/builtin/wasm/wasm.go +++ b/agent/envoyextensions/builtin/wasm/wasm.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package wasm diff --git a/agent/envoyextensions/builtin/wasm/wasm_test.go b/agent/envoyextensions/builtin/wasm/wasm_test.go index fd348d59a73d4..93f3a4b5e0aff 100644 --- a/agent/envoyextensions/builtin/wasm/wasm_test.go +++ b/agent/envoyextensions/builtin/wasm/wasm_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package wasm @@ -18,6 +18,7 @@ import ( envoy_network_wasm_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/wasm/v3" envoy_wasm_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/wasm/v3" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/durationpb" @@ -137,8 +138,8 @@ func TestHttpWasmExtension(t *testing.T) { t.Logf("cfg =\n%s\n\n", cfg.toJSON(t)) require.Equal(t, len(expFilters), len(obsFilters)) for idx, expFilter := range expFilters { - t.Logf("expFilterJSON[%d] =\n%s\n\n", idx, prototest.ProtoToJSON(t, expFilter)) - t.Logf("obsfilterJSON[%d] =\n%s\n\n", idx, prototest.ProtoToJSON(t, obsFilters[idx])) + t.Logf("expFilterJSON[%d] =\n%s\n\n", idx, protoToJSON(t, expFilter)) + t.Logf("obsfilterJSON[%d] =\n%s\n\n", idx, protoToJSON(t, obsFilters[idx])) } } @@ -650,6 +651,16 @@ func newTestWasmConfig(protocol string, enterprise bool) *testWasmConfig { return cfg } +func protoToJSON(t *testing.T, pb proto.Message) string { + t.Helper() + m := protojson.MarshalOptions{ + Indent: " ", + } + gotJSON, err := m.Marshal(pb) + require.NoError(t, err) + return string(gotJSON) +} + func setField(m map[string]any, path string, value any) { upsertField(m, path, value, 0) } diff --git a/agent/envoyextensions/registered_extensions.go b/agent/envoyextensions/registered_extensions.go index b2bb2aeeaaa9e..7b0f2ae61da14 100644 --- a/agent/envoyextensions/registered_extensions.go +++ b/agent/envoyextensions/registered_extensions.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package envoyextensions @@ -12,7 +12,6 @@ import ( awslambda "github.com/hashicorp/consul/agent/envoyextensions/builtin/aws-lambda" extauthz "github.com/hashicorp/consul/agent/envoyextensions/builtin/ext-authz" "github.com/hashicorp/consul/agent/envoyextensions/builtin/lua" - otelaccesslogging "github.com/hashicorp/consul/agent/envoyextensions/builtin/otel-access-logging" propertyoverride "github.com/hashicorp/consul/agent/envoyextensions/builtin/property-override" "github.com/hashicorp/consul/agent/envoyextensions/builtin/wasm" "github.com/hashicorp/consul/api" @@ -22,25 +21,22 @@ import ( type extensionConstructor func(api.EnvoyExtension) (extensioncommon.EnvoyExtender, error) var extensionConstructors = map[string]extensionConstructor{ - api.BuiltinOTELAccessLoggingExtension: otelaccesslogging.Constructor, - api.BuiltinLuaExtension: lua.Constructor, - api.BuiltinAWSLambdaExtension: awslambda.Constructor, - api.BuiltinPropertyOverrideExtension: propertyoverride.Constructor, - api.BuiltinWasmExtension: wasm.Constructor, - api.BuiltinExtAuthzExtension: extauthz.Constructor, + api.BuiltinLuaExtension: lua.Constructor, + api.BuiltinAWSLambdaExtension: awslambda.Constructor, + api.BuiltinPropertyOverrideExtension: propertyoverride.Constructor, + api.BuiltinWasmExtension: wasm.Constructor, + api.BuiltinExtAuthzExtension: extauthz.Constructor, } // ConstructExtension attempts to lookup and build an extension from the registry with the // given config. Returns an error if the extension does not exist, or if the extension fails // to be constructed properly. func ConstructExtension(ext api.EnvoyExtension) (extensioncommon.EnvoyExtender, error) { - if constructor, ok := extensionConstructors[ext.Name]; ok { - return constructor(ext) + constructor, ok := extensionConstructors[ext.Name] + if !ok { + return nil, fmt.Errorf("name %q is not a built-in extension", ext.Name) } - if constructor, ok := enterpriseExtensionConstructors[ext.Name]; ok { - return constructor(ext) - } - return nil, fmt.Errorf("name %q is not a built-in extension", ext.Name) + return constructor(ext) } // ValidateExtensions will attempt to construct each instance of the given envoy extension configurations diff --git a/agent/envoyextensions/registered_extensions_ce.go b/agent/envoyextensions/registered_extensions_ce.go deleted file mode 100644 index 4b9e07e50ba46..0000000000000 --- a/agent/envoyextensions/registered_extensions_ce.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package envoyextensions - -var enterpriseExtensionConstructors = map[string]extensionConstructor{} diff --git a/agent/envoyextensions/registered_extensions_test.go b/agent/envoyextensions/registered_extensions_test.go index 818db87fafea8..7f3cb6bbac7dc 100644 --- a/agent/envoyextensions/registered_extensions_test.go +++ b/agent/envoyextensions/registered_extensions_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package envoyextensions diff --git a/agent/event_endpoint.go b/agent/event_endpoint.go index da589632c75af..034dec305619b 100644 --- a/agent/event_endpoint.go +++ b/agent/event_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/event_endpoint_test.go b/agent/event_endpoint_test.go index f28b913cfd009..4be21a6914b91 100644 --- a/agent/event_endpoint_test.go +++ b/agent/event_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -234,7 +234,7 @@ func TestEventList_ACLFilter(t *testing.T) { t.Run("token with access to one event type", func(t *testing.T) { retry.Run(t, func(r *retry.R) { - token := testCreateToken(r, a, ` + token := testCreateToken(t, a, ` event "foo" { policy = "read" } diff --git a/agent/exec/exec.go b/agent/exec/exec.go index 408dc6bb8110d..d4b4bfafd1fed 100644 --- a/agent/exec/exec.go +++ b/agent/exec/exec.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package exec diff --git a/agent/exec/exec_unix.go b/agent/exec/exec_unix.go index e0ec556771c0b..32ff23249e3b0 100644 --- a/agent/exec/exec_unix.go +++ b/agent/exec/exec_unix.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !windows +// +build !windows package exec diff --git a/agent/exec/exec_windows.go b/agent/exec/exec_windows.go index d85ea09d785e6..1a0cb4c82c806 100644 --- a/agent/exec/exec_windows.go +++ b/agent/exec/exec_windows.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build windows +// +build windows package exec diff --git a/agent/federation_state_endpoint.go b/agent/federation_state_endpoint.go index 40a3df1ff1cd9..0bec145ae60cd 100644 --- a/agent/federation_state_endpoint.go +++ b/agent/federation_state_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/grpc-external/forward.go b/agent/grpc-external/forward.go index 395fb6aa479d0..c0ed064ac808c 100644 --- a/agent/grpc-external/forward.go +++ b/agent/grpc-external/forward.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package external diff --git a/agent/grpc-external/limiter/limiter.go b/agent/grpc-external/limiter/limiter.go index 44aaac616f99f..f995f963049be 100644 --- a/agent/grpc-external/limiter/limiter.go +++ b/agent/grpc-external/limiter/limiter.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // package limiter provides primatives for limiting the number of concurrent // operations in-flight. diff --git a/agent/grpc-external/limiter/limiter_test.go b/agent/grpc-external/limiter/limiter_test.go index 3cfa3ad263273..fa165a66706bb 100644 --- a/agent/grpc-external/limiter/limiter_test.go +++ b/agent/grpc-external/limiter/limiter_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package limiter diff --git a/agent/grpc-external/options.go b/agent/grpc-external/options.go index 04e5c10efb513..a25a4482990f6 100644 --- a/agent/grpc-external/options.go +++ b/agent/grpc-external/options.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package external diff --git a/agent/grpc-external/options_test.go b/agent/grpc-external/options_test.go index ccc0ad12fd697..b2edb8fbf1f50 100644 --- a/agent/grpc-external/options_test.go +++ b/agent/grpc-external/options_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package external diff --git a/agent/grpc-external/server.go b/agent/grpc-external/server.go index 8e3928eb7d824..fedffde8f4dc8 100644 --- a/agent/grpc-external/server.go +++ b/agent/grpc-external/server.go @@ -1,33 +1,23 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package external import ( - "context" - "fmt" - "strings" "time" "github.com/armon/go-metrics" middleware "github.com/grpc-ecosystem/go-grpc-middleware" recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - "github.com/hashi-derek/grpc-proxy/proxy" - "github.com/hashicorp/go-hclog" "google.golang.org/grpc" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" "github.com/hashicorp/consul/agent/consul/rate" agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware" "github.com/hashicorp/consul/tlsutil" ) -const FORWARD_SERVICE_NAME_PREFIX = "/hashicorp.consul." - var ( metricsLabels = []metrics.Label{{ Name: "server_type", @@ -38,12 +28,11 @@ var ( // NewServer constructs a gRPC server for the external gRPC port, to which // handlers can be registered. func NewServer( - logger hclog.Logger, + logger agentmiddleware.Logger, metricsObj *metrics.Metrics, tls *tlsutil.Configurator, limiter rate.RequestLimitsHandler, keepaliveParams keepalive.ServerParameters, - serverConn *grpc.ClientConn, ) *grpc.Server { if metricsObj == nil { metricsObj = metrics.Default() @@ -82,11 +71,6 @@ func NewServer( }), } - // forward FORWARD_SERVICE_NAME_PREFIX services from client agent to server agent - if serverConn != nil { - opts = append(opts, grpc.UnknownServiceHandler(proxy.TransparentHandler(makeDirector(serverConn, logger)))) - } - if tls != nil { // Attach TLS credentials, if provided. tlsCreds := agentmiddleware.NewOptionalTransportCredentials( @@ -96,24 +80,3 @@ func NewServer( } return grpc.NewServer(opts...) } - -func makeDirector(serverConn *grpc.ClientConn, logger hclog.Logger) func(ctx context.Context, fullMethodName string) (context.Context, *grpc.ClientConn, error) { - return func(ctx context.Context, fullMethodName string) (context.Context, *grpc.ClientConn, error) { - var mdCopy metadata.MD - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - mdCopy = metadata.MD{} - } else { - mdCopy = md.Copy() - } - outCtx := metadata.NewOutgoingContext(ctx, mdCopy) - - logger.Debug("forwarding the request to the consul server", "method", fullMethodName) - // throw unimplemented error if the method is not meant to be forwarded - if !strings.HasPrefix(fullMethodName, FORWARD_SERVICE_NAME_PREFIX) { - return outCtx, nil, status.Errorf(codes.Unimplemented, fmt.Sprintf("Unknown method %s", fullMethodName)) - } - - return outCtx, serverConn, nil - } -} diff --git a/agent/grpc-external/services/acl/login.go b/agent/grpc-external/services/acl/login.go index 1e44acf8a1712..c8c399d108be3 100644 --- a/agent/grpc-external/services/acl/login.go +++ b/agent/grpc-external/services/acl/login.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl diff --git a/agent/grpc-external/services/acl/login_test.go b/agent/grpc-external/services/acl/login_test.go index 3b956d7c8c71e..e858618a906b4 100644 --- a/agent/grpc-external/services/acl/login_test.go +++ b/agent/grpc-external/services/acl/login_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl diff --git a/agent/grpc-external/services/acl/logout.go b/agent/grpc-external/services/acl/logout.go index 691ac7b888949..bd3bb5e3e42a2 100644 --- a/agent/grpc-external/services/acl/logout.go +++ b/agent/grpc-external/services/acl/logout.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl diff --git a/agent/grpc-external/services/acl/logout_test.go b/agent/grpc-external/services/acl/logout_test.go index df5c39628297c..69491db5e3b0a 100644 --- a/agent/grpc-external/services/acl/logout_test.go +++ b/agent/grpc-external/services/acl/logout_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl diff --git a/agent/grpc-external/services/acl/server.go b/agent/grpc-external/services/acl/server.go index cc7e35d1e622f..5513950e02ec8 100644 --- a/agent/grpc-external/services/acl/server.go +++ b/agent/grpc-external/services/acl/server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl @@ -53,8 +53,8 @@ func NewServer(cfg Config) *Server { return &Server{cfg} } -func (s *Server) Register(registrar grpc.ServiceRegistrar) { - pbacl.RegisterACLServiceServer(registrar, s) +func (s *Server) Register(grpcServer *grpc.Server) { + pbacl.RegisterACLServiceServer(grpcServer, s) } func (s *Server) requireACLsEnabled(logger hclog.Logger) error { diff --git a/agent/grpc-external/services/acl/server_test.go b/agent/grpc-external/services/acl/server_test.go index 1b6cd066001ef..89c49bf226a13 100644 --- a/agent/grpc-external/services/acl/server_test.go +++ b/agent/grpc-external/services/acl/server_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package acl diff --git a/agent/grpc-external/services/configentry/server.go b/agent/grpc-external/services/configentry/server.go deleted file mode 100644 index 15b9895159125..0000000000000 --- a/agent/grpc-external/services/configentry/server.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package configentry - -import ( - "context" - "fmt" - "time" - - "github.com/armon/go-metrics" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-memdb" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - grpcstatus "google.golang.org/grpc/status" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/acl/resolver" - "github.com/hashicorp/consul/agent/blockingquery" - "github.com/hashicorp/consul/agent/consul/state" - external "github.com/hashicorp/consul/agent/grpc-external" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/private/pbconfigentry" -) - -// Server implements pbconfigentry.ConfigEntryService to provide RPC operations related to -// configentries -type Server struct { - Config -} - -type Config struct { - Backend Backend - Logger hclog.Logger - ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error) - FSMServer blockingquery.FSMServer -} - -type Backend interface { - EnterpriseCheckPartitions(partition string) error - - ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzCtx *acl.AuthorizerContext) (resolver.Result, error) -} - -func NewServer(cfg Config) *Server { - external.RequireNotNil(cfg.Backend, "Backend") - external.RequireNotNil(cfg.Logger, "Logger") - external.RequireNotNil(cfg.FSMServer, "FSMServer") - - return &Server{ - Config: cfg, - } -} - -var _ pbconfigentry.ConfigEntryServiceServer = (*Server)(nil) - -type readRequest struct { - structs.QueryOptions - structs.DCSpecificRequest -} - -func (s *Server) Register(grpcServer grpc.ServiceRegistrar) { - pbconfigentry.RegisterConfigEntryServiceServer(grpcServer, s) -} - -func (s *Server) GetResolvedExportedServices( - ctx context.Context, - req *pbconfigentry.GetResolvedExportedServicesRequest, -) (*pbconfigentry.GetResolvedExportedServicesResponse, error) { - - if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { - return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) - } - - options, err := external.QueryOptionsFromContext(ctx) - if err != nil { - return nil, err - } - - var resp *pbconfigentry.GetResolvedExportedServicesResponse - var emptyDCSpecificRequest structs.DCSpecificRequest - - handled, err := s.ForwardRPC(&readRequest{options, emptyDCSpecificRequest}, func(conn *grpc.ClientConn) error { - var err error - resp, err = pbconfigentry.NewConfigEntryServiceClient(conn).GetResolvedExportedServices(ctx, req) - return err - }) - if handled || err != nil { - return resp, err - } - - defer metrics.MeasureSince([]string{"configentry", "get_resolved_exported_services"}, time.Now()) - - var authzCtx acl.AuthorizerContext - entMeta := structs.DefaultEnterpriseMetaInPartition(req.Partition) - - authz, err := s.Backend.ResolveTokenAndDefaultMeta(options.Token, entMeta, &authzCtx) - if err != nil { - return nil, err - } - - if err := authz.ToAllowAuthorizer().MeshReadAllowed(&authzCtx); err != nil { - return nil, err - } - - res := &pbconfigentry.GetResolvedExportedServicesResponse{} - meta := structs.QueryMeta{} - err = blockingquery.Query(s.FSMServer, &options, &meta, func(ws memdb.WatchSet, store *state.Store) error { - idx, exportedSvcs, err := store.ResolvedExportedServices(ws, entMeta) - if err != nil { - return err - } - - meta.SetIndex(idx) - - res.Services = exportedSvcs - return nil - }) - if err != nil { - return nil, fmt.Errorf("error executing exported services blocking query: %w", err) - } - - header, err := external.GRPCMetadataFromQueryMeta(meta) - if err != nil { - return nil, fmt.Errorf("could not convert query metadata to gRPC header") - } - if err := grpc.SendHeader(ctx, header); err != nil { - return nil, fmt.Errorf("could not send gRPC header") - } - - return res, nil -} diff --git a/agent/grpc-external/services/configentry/server_ce_test.go b/agent/grpc-external/services/configentry/server_ce_test.go deleted file mode 100644 index 953cbde464e10..0000000000000 --- a/agent/grpc-external/services/configentry/server_ce_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package configentry - -import ( - "context" - "testing" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/grpc-external/testutils" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/private/pbconfigentry" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -func TestGetResolvedExportedServices(t *testing.T) { - authorizer := acl.MockAuthorizer{} - authorizer.On("MeshRead", mock.Anything).Return(acl.Allow) - - backend := &MockBackend{authorizer: &authorizer} - backend.On("EnterpriseCheckPartitions", mock.Anything).Return(nil) - - fakeFSM := testutils.NewFakeBlockingFSM(t) - - c := Config{ - Backend: backend, - Logger: hclog.New(nil), - ForwardRPC: doForwardRPC, - FSMServer: fakeFSM, - } - server := NewServer(c) - - // Add config entry - entry := &structs.ExportedServicesConfigEntry{ - Name: "default", - Services: []structs.ExportedService{ - { - Name: "db", - Consumers: []structs.ServiceConsumer{ - { - Peer: "east", - }, - { - Peer: "west", - }, - }, - }, - { - Name: "cache", - Consumers: []structs.ServiceConsumer{ - { - Peer: "east", - }, - }, - }, - }, - } - fakeFSM.GetState().EnsureConfigEntry(1, entry) - - expected := []*pbconfigentry.ResolvedExportedService{ - { - Service: "cache", - Consumers: &pbconfigentry.Consumers{ - Peers: []string{"east"}, - }, - }, - { - Service: "db", - Consumers: &pbconfigentry.Consumers{ - Peers: []string{"east", "west"}, - }, - }, - } - - ctx := grpc.NewContextWithServerTransportStream(context.Background(), &testutils.MockServerTransportStream{}) - resp, err := server.GetResolvedExportedServices(ctx, &pbconfigentry.GetResolvedExportedServicesRequest{}) - require.NoError(t, err) - require.Equal(t, expected, resp.Services) -} diff --git a/agent/grpc-external/services/configentry/server_test.go b/agent/grpc-external/services/configentry/server_test.go deleted file mode 100644 index f89d15233e1a6..0000000000000 --- a/agent/grpc-external/services/configentry/server_test.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package configentry - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/armon/go-metrics" - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/acl/resolver" - "github.com/hashicorp/consul/agent/grpc-external/testutils" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/private/pbconfigentry" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -type MockBackend struct { - mock.Mock - authorizer acl.Authorizer -} - -func (m *MockBackend) ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (resolver.Result, error) { - return resolver.Result{Authorizer: m.authorizer}, nil -} - -func (m *MockBackend) EnterpriseCheckPartitions(partition string) error { - called := m.Called(partition) - ret := called.Get(0) - - if ret == nil { - return nil - } else { - return ret.(error) - } -} - -func TestGetResolvedExportedServices_ACL_Deny(t *testing.T) { - authorizer := acl.MockAuthorizer{} - authorizer.On("MeshRead", mock.Anything).Return(acl.Deny) - - backend := &MockBackend{authorizer: &authorizer} - backend.On("EnterpriseCheckPartitions", mock.Anything).Return(nil) - - fakeFSM := testutils.NewFakeBlockingFSM(t) - - c := Config{ - Backend: backend, - Logger: hclog.New(nil), - ForwardRPC: doForwardRPC, - FSMServer: fakeFSM, - } - - server := NewServer(c) - - _, err := server.GetResolvedExportedServices(context.Background(), &pbconfigentry.GetResolvedExportedServicesRequest{}) - require.Error(t, err) -} - -func TestGetResolvedExportedServices_AC_Allow(t *testing.T) { - authorizer := acl.MockAuthorizer{} - authorizer.On("MeshRead", mock.Anything).Return(acl.Allow) - - backend := &MockBackend{authorizer: &authorizer} - backend.On("EnterpriseCheckPartitions", mock.Anything).Return(nil) - - fakeFSM := testutils.NewFakeBlockingFSM(t) - - c := Config{ - Backend: backend, - Logger: hclog.New(nil), - ForwardRPC: doForwardRPC, - FSMServer: fakeFSM, - } - server := NewServer(c) - - ctx := grpc.NewContextWithServerTransportStream(context.Background(), &testutils.MockServerTransportStream{}) - _, err := server.GetResolvedExportedServices(ctx, &pbconfigentry.GetResolvedExportedServicesRequest{}) - require.NoError(t, err) -} - -func TestGetResolvedExportedServices_PartitionCheck(t *testing.T) { - authorizer := acl.MockAuthorizer{} - authorizer.On("MeshRead", mock.Anything).Return(acl.Allow) - - backend := &MockBackend{authorizer: &authorizer} - backend.On("EnterpriseCheckPartitions", mock.Anything).Return(fmt.Errorf("partition not supported")) - - fakeFSM := testutils.NewFakeBlockingFSM(t) - - c := Config{ - Backend: backend, - Logger: hclog.New(nil), - ForwardRPC: doForwardRPC, - FSMServer: fakeFSM, - } - - server := NewServer(c) - - ctx := grpc.NewContextWithServerTransportStream(context.Background(), &testutils.MockServerTransportStream{}) - - resp, err := server.GetResolvedExportedServices(ctx, &pbconfigentry.GetResolvedExportedServicesRequest{}) - require.EqualError(t, err, "rpc error: code = InvalidArgument desc = partition not supported") - require.Nil(t, resp) -} - -func TestGetResolvedExportedServices_Index(t *testing.T) { - authorizer := acl.MockAuthorizer{} - authorizer.On("MeshRead", mock.Anything).Return(acl.Allow) - - backend := &MockBackend{authorizer: &authorizer} - backend.On("EnterpriseCheckPartitions", mock.Anything).Return(nil) - - fakeFSM := testutils.NewFakeBlockingFSM(t) - - c := Config{ - Backend: backend, - Logger: hclog.New(nil), - ForwardRPC: doForwardRPC, - FSMServer: fakeFSM, - } - server := NewServer(c) - - // Add config entry - entry := &structs.ExportedServicesConfigEntry{ - Name: "default", - Services: []structs.ExportedService{ - { - Name: "db", - Consumers: []structs.ServiceConsumer{ - { - Peer: "east", - }, - { - Peer: "west", - }, - }, - }, - { - Name: "cache", - Consumers: []structs.ServiceConsumer{ - { - Peer: "east", - }, - }, - }, - }, - } - fakeFSM.GetState().EnsureConfigEntry(1, entry) - - headerStream := &testutils.MockServerTransportStream{} - - ctx := grpc.NewContextWithServerTransportStream(context.Background(), headerStream) - resp, err := server.GetResolvedExportedServices(ctx, &pbconfigentry.GetResolvedExportedServicesRequest{}) - require.NoError(t, err) - require.Equal(t, 2, len(resp.Services)) - require.Equal(t, []string{"1"}, headerStream.MD.Get("index")) - - // Updating the index - fakeFSM.GetState().EnsureConfigEntry(2, entry) - - headerStream = &testutils.MockServerTransportStream{} - - ctx = grpc.NewContextWithServerTransportStream(context.Background(), headerStream) - resp, err = server.GetResolvedExportedServices(ctx, &pbconfigentry.GetResolvedExportedServicesRequest{}) - require.NoError(t, err) - require.Equal(t, 2, len(resp.Services)) - require.Equal(t, []string{"2"}, headerStream.MD.Get("index")) -} - -func TestGetResolvedExportedServices_Metrics(t *testing.T) { - sink := metrics.NewInmemSink(5*time.Second, time.Minute) - cfg := metrics.DefaultConfig("consul") - metrics.NewGlobal(cfg, sink) - - authorizer := acl.MockAuthorizer{} - authorizer.On("MeshRead", mock.Anything).Return(acl.Allow) - - backend := &MockBackend{authorizer: &authorizer} - backend.On("EnterpriseCheckPartitions", mock.Anything).Return(nil) - - fakeFSM := testutils.NewFakeBlockingFSM(t) - - c := Config{ - Backend: backend, - Logger: hclog.New(nil), - ForwardRPC: doForwardRPC, - FSMServer: fakeFSM, - } - server := NewServer(c) - - // Add config entry - entry := &structs.ExportedServicesConfigEntry{ - Name: "default", - Services: []structs.ExportedService{ - { - Name: "db", - Consumers: []structs.ServiceConsumer{ - { - Peer: "east", - }, - { - Peer: "west", - }, - }, - }, - { - Name: "cache", - Consumers: []structs.ServiceConsumer{ - { - Peer: "east", - }, - }, - }, - }, - } - fakeFSM.GetState().EnsureConfigEntry(1, entry) - - ctx := grpc.NewContextWithServerTransportStream(context.Background(), &testutils.MockServerTransportStream{}) - resp, err := server.GetResolvedExportedServices(ctx, &pbconfigentry.GetResolvedExportedServicesRequest{}) - require.NoError(t, err) - require.Equal(t, 2, len(resp.Services)) - - // Checking if metrics were added - require.NotNil(t, sink.Data()[0].Samples[`consul.configentry.get_resolved_exported_services`]) -} - -func doForwardRPC(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error) { - return false, nil -} diff --git a/agent/grpc-external/services/connectca/server.go b/agent/grpc-external/services/connectca/server.go index fbdabc1bd8990..c90962e180c84 100644 --- a/agent/grpc-external/services/connectca/server.go +++ b/agent/grpc-external/services/connectca/server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connectca @@ -57,8 +57,8 @@ func NewServer(cfg Config) *Server { return &Server{cfg} } -func (s *Server) Register(registrar grpc.ServiceRegistrar) { - pbconnectca.RegisterConnectCAServiceServer(registrar, s) +func (s *Server) Register(grpcServer *grpc.Server) { + pbconnectca.RegisterConnectCAServiceServer(grpcServer, s) } func (s *Server) requireConnect() error { diff --git a/agent/grpc-external/services/connectca/server_test.go b/agent/grpc-external/services/connectca/server_test.go index 27c8d17c0c9dc..84636e9e75886 100644 --- a/agent/grpc-external/services/connectca/server_test.go +++ b/agent/grpc-external/services/connectca/server_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connectca diff --git a/agent/grpc-external/services/connectca/sign.go b/agent/grpc-external/services/connectca/sign.go index 148bf675b0570..59c1a6f28354a 100644 --- a/agent/grpc-external/services/connectca/sign.go +++ b/agent/grpc-external/services/connectca/sign.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connectca diff --git a/agent/grpc-external/services/connectca/sign_test.go b/agent/grpc-external/services/connectca/sign_test.go index 07be304081ebb..e43978e0b906d 100644 --- a/agent/grpc-external/services/connectca/sign_test.go +++ b/agent/grpc-external/services/connectca/sign_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connectca diff --git a/agent/grpc-external/services/connectca/watch_roots.go b/agent/grpc-external/services/connectca/watch_roots.go index ddd02ca56e0f9..14927e2188a18 100644 --- a/agent/grpc-external/services/connectca/watch_roots.go +++ b/agent/grpc-external/services/connectca/watch_roots.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connectca diff --git a/agent/grpc-external/services/connectca/watch_roots_test.go b/agent/grpc-external/services/connectca/watch_roots_test.go index 171e00324643c..bfdb76f33bdd8 100644 --- a/agent/grpc-external/services/connectca/watch_roots_test.go +++ b/agent/grpc-external/services/connectca/watch_roots_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package connectca diff --git a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go index ea4852efab2be..13bbd1c9f94b9 100644 --- a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go +++ b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package dataplane @@ -8,17 +8,11 @@ import ( "errors" "strings" - "github.com/hashicorp/go-hclog" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/structpb" - "github.com/hashicorp/consul/internal/resource" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" - pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/consul/state" @@ -29,11 +23,7 @@ import ( ) func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.GetEnvoyBootstrapParamsRequest) (*pbdataplane.GetEnvoyBootstrapParamsResponse, error) { - proxyID := req.ProxyId - if req.GetServiceId() != "" { - proxyID = req.GetServiceId() - } - logger := s.Logger.Named("get-envoy-bootstrap-params").With("proxy_id", proxyID, "request_id", external.TraceID()) + logger := s.Logger.Named("get-envoy-bootstrap-params").With("service_id", req.GetServiceId(), "request_id", external.TraceID()) logger.Trace("Started processing request") defer logger.Trace("Finished processing request") @@ -50,75 +40,9 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G return nil, status.Error(codes.Unauthenticated, err.Error()) } - if s.EnableV2 { - // Get the workload. - workloadId := &pbresource.ID{ - Name: proxyID, - Tenancy: &pbresource.Tenancy{ - Namespace: req.Namespace, - Partition: req.Partition, - }, - Type: pbcatalog.WorkloadType, - } - workloadRsp, err := s.ResourceAPIClient.Read(ctx, &pbresource.ReadRequest{ - Id: workloadId, - }) - if err != nil { - // This error should already include the gRPC status code and so we don't need to wrap it - // in status.Error. - logger.Error("Error looking up workload", "error", err) - return nil, err - } - var workload pbcatalog.Workload - err = workloadRsp.Resource.Data.UnmarshalTo(&workload) - if err != nil { - return nil, status.Error(codes.Internal, "failed to parse workload data") - } - - // Only workloads that have an associated identity can ask for proxy bootstrap parameters. - if workload.Identity == "" { - return nil, status.Errorf(codes.InvalidArgument, "workload %q doesn't have identity associated with it", req.ProxyId) - } - - // verify identity:write is allowed. if not, give permission denied error. - if err := authz.ToAllowAuthorizer().IdentityWriteAllowed(workload.Identity, &authzContext); err != nil { - return nil, err - } - - computedProxyConfig, err := resource.GetDecodedResource[*pbmesh.ComputedProxyConfiguration]( - ctx, - s.ResourceAPIClient, - resource.ReplaceType(pbmesh.ComputedProxyConfigurationType, workloadId)) - - if err != nil { - logger.Error("Error looking up ComputedProxyConfiguration for this workload", "error", err) - return nil, err - } - - rsp := &pbdataplane.GetEnvoyBootstrapParamsResponse{ - Identity: workload.Identity, - Partition: workloadRsp.Resource.Id.Tenancy.Partition, - Namespace: workloadRsp.Resource.Id.Tenancy.Namespace, - Datacenter: s.Datacenter, - NodeName: workload.NodeName, - } - - if computedProxyConfig != nil { - if computedProxyConfig.GetData().GetDynamicConfig() != nil { - rsp.AccessLogs = makeAccessLogs(computedProxyConfig.GetData().GetDynamicConfig().GetAccessLogs(), logger) - } - - rsp.BootstrapConfig = computedProxyConfig.GetData().GetBootstrapConfig() - } - - return rsp, nil - } - - // The remainder of this file focuses on v1 implementation of this endpoint. - store := s.GetStore() - _, svc, err := store.ServiceNode(req.GetNodeId(), req.GetNodeName(), proxyID, &entMeta, structs.DefaultPeerKeyword) + _, svc, err := store.ServiceNode(req.GetNodeId(), req.GetNodeName(), req.GetServiceId(), &entMeta, structs.DefaultPeerKeyword) if err != nil { logger.Error("Error looking up service", "error", err) if errors.Is(err, state.ErrNodeNotFound) { @@ -157,34 +81,8 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G // Inspect access logging // This is non-essential, and don't want to return an error unless there is a more serious issue var accessLogs []string - if ns != nil { - accessLogs = makeAccessLogs(&ns.Proxy.AccessLogs, logger) - } - - // Build out the response - var serviceName string - if svc.ServiceKind == structs.ServiceKindConnectProxy { - serviceName = svc.ServiceProxy.DestinationServiceName - } else { - serviceName = svc.ServiceName - } - - return &pbdataplane.GetEnvoyBootstrapParamsResponse{ - Identity: serviceName, - Service: serviceName, - Partition: svc.EnterpriseMeta.PartitionOrDefault(), - Namespace: svc.EnterpriseMeta.NamespaceOrDefault(), - Config: bootstrapConfig, - Datacenter: s.Datacenter, - NodeName: svc.Node, - AccessLogs: accessLogs, - }, nil -} - -func makeAccessLogs(logs structs.AccessLogs, logger hclog.Logger) []string { - var accessLogs []string - if logs.GetEnabled() { - envoyLoggers, err := accesslogs.MakeAccessLogs(logs, false) + if ns != nil && ns.Proxy.AccessLogs.Enabled { + envoyLoggers, err := accesslogs.MakeAccessLogs(&ns.Proxy.AccessLogs, false) if err != nil { logger.Warn("Error creating the envoy access log config", "error", err) } @@ -200,5 +98,41 @@ func makeAccessLogs(logs structs.AccessLogs, logger hclog.Logger) []string { } } - return accessLogs + // Build out the response + var serviceName string + if svc.ServiceKind == structs.ServiceKindConnectProxy { + serviceName = svc.ServiceProxy.DestinationServiceName + } else { + serviceName = svc.ServiceName + } + + return &pbdataplane.GetEnvoyBootstrapParamsResponse{ + Service: serviceName, + Partition: svc.EnterpriseMeta.PartitionOrDefault(), + Namespace: svc.EnterpriseMeta.NamespaceOrDefault(), + Config: bootstrapConfig, + Datacenter: s.Datacenter, + ServiceKind: convertToResponseServiceKind(svc.ServiceKind), + NodeName: svc.Node, + NodeId: string(svc.ID), + AccessLogs: accessLogs, + }, nil +} + +func convertToResponseServiceKind(serviceKind structs.ServiceKind) (respKind pbdataplane.ServiceKind) { + switch serviceKind { + case structs.ServiceKindConnectProxy: + respKind = pbdataplane.ServiceKind_SERVICE_KIND_CONNECT_PROXY + case structs.ServiceKindMeshGateway: + respKind = pbdataplane.ServiceKind_SERVICE_KIND_MESH_GATEWAY + case structs.ServiceKindTerminatingGateway: + respKind = pbdataplane.ServiceKind_SERVICE_KIND_TERMINATING_GATEWAY + case structs.ServiceKindIngressGateway: + respKind = pbdataplane.ServiceKind_SERVICE_KIND_INGRESS_GATEWAY + case structs.ServiceKindAPIGateway: + respKind = pbdataplane.ServiceKind_SERVICE_KIND_API_GATEWAY + case structs.ServiceKindTypical: + respKind = pbdataplane.ServiceKind_SERVICE_KIND_TYPICAL + } + return } diff --git a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go index 2a50094029076..322d2f6527459 100644 --- a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go +++ b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package dataplane @@ -7,33 +7,23 @@ import ( "context" "testing" + "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/structpb" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl/resolver" external "github.com/hashicorp/consul/agent/grpc-external" - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/agent/grpc-external/testutils" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/catalog" - "github.com/hashicorp/consul/internal/mesh" - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/internal/resource/resourcetest" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbdataplane" - pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/types" ) const ( - testIdentity = "test-identity" testToken = "acl-token-get-envoy-bootstrap-params" testServiceName = "web" proxyServiceID = "web-proxy" @@ -51,15 +41,13 @@ const ( proxyDefaultsRequestTimeout = 1111 serviceDefaultsProtocol = "tcp" serviceDefaultsConnectTimeout = 4444 - - testAccessLogs = "{\"name\":\"Consul Listener Filter Log\",\"typedConfig\":{\"@type\":\"type.googleapis.com/envoy.extensions.access_loggers.stream.v3.StdoutAccessLog\",\"logFormat\":{\"jsonFormat\":{\"custom_field\":\"%START_TIME%\"}}}}" ) func testRegisterRequestProxy(t *testing.T) *structs.RegisterRequest { return &structs.RegisterRequest{ Datacenter: serverDC, Node: nodeName, - ID: nodeID, + ID: types.NodeID(nodeID), Address: "127.0.0.1", Service: &structs.NodeService{ Kind: structs.ServiceKindConnectProxy, @@ -80,7 +68,7 @@ func testRegisterRequestProxy(t *testing.T) *structs.RegisterRequest { func testRegisterIngressGateway(t *testing.T) *structs.RegisterRequest { registerReq := structs.TestRegisterIngressGateway(t) - registerReq.ID = "2980b72b-bd9d-9d7b-d4f9-951bf7508d95" + registerReq.ID = types.NodeID("2980b72b-bd9d-9d7b-d4f9-951bf7508d95") registerReq.Service.ID = registerReq.Service.Service registerReq.Service.Proxy.Config = map[string]interface{}{ proxyConfigKey: proxyConfigValue, @@ -179,7 +167,9 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) { require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition) require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace) requireConfigField(t, resp, proxyConfigKey, structpb.NewStringValue(proxyConfigValue)) + require.Equal(t, convertToResponseServiceKind(tc.registerReq.Service.Kind), resp.ServiceKind) require.Equal(t, tc.registerReq.Node, resp.NodeName) + require.Equal(t, string(tc.registerReq.ID), resp.NodeId) if tc.serviceDefaults != nil && tc.proxyDefaults != nil { // service-defaults take precedence over proxy-defaults @@ -252,156 +242,6 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) { } } -func TestGetEnvoyBootstrapParams_Success_EnableV2(t *testing.T) { - type testCase struct { - name string - workloadData *pbcatalog.Workload - proxyCfg *pbmesh.ComputedProxyConfiguration - expBootstrapCfg *pbmesh.BootstrapConfig - expAccessLogs string - } - - run := func(t *testing.T, tc testCase) { - resourceClient := svctest.NewResourceServiceBuilder(). - WithRegisterFns(catalog.RegisterTypes, mesh.RegisterTypes). - Run(t) - - options := structs.QueryOptions{Token: testToken} - ctx, err := external.ContextWithQueryOptions(context.Background(), options) - require.NoError(t, err) - - aclResolver := &MockACLResolver{} - - server := NewServer(Config{ - Logger: hclog.NewNullLogger(), - ACLResolver: aclResolver, - Datacenter: serverDC, - EnableV2: true, - ResourceAPIClient: resourceClient, - }) - client := testClient(t, server) - - // Add required fields to workload data. - tc.workloadData.Addresses = []*pbcatalog.WorkloadAddress{ - { - Host: "127.0.0.1", - }, - } - tc.workloadData.Ports = map[string]*pbcatalog.WorkloadPort{ - "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, - } - workloadResource := resourcetest.Resource(pbcatalog.WorkloadType, "test-workload"). - WithData(t, tc.workloadData). - WithTenancy(resource.DefaultNamespacedTenancy()). - Write(t, resourceClient) - - // Create computed proxy cfg resource. - resourcetest.Resource(pbmesh.ComputedProxyConfigurationType, workloadResource.Id.Name). - WithData(t, tc.proxyCfg). - WithTenancy(resource.DefaultNamespacedTenancy()). - Write(t, resourceClient) - - req := &pbdataplane.GetEnvoyBootstrapParamsRequest{ - ProxyId: workloadResource.Id.Name, - Namespace: workloadResource.Id.Tenancy.Namespace, - Partition: workloadResource.Id.Tenancy.Partition, - } - - aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything). - Return(testutils.ACLUseProvidedPolicy(t, - &acl.Policy{ - PolicyRules: acl.PolicyRules{ - Services: []*acl.ServiceRule{ - { - Name: workloadResource.Id.Name, - Policy: acl.PolicyRead, - }, - }, - Identities: []*acl.IdentityRule{ - { - Name: testIdentity, - Policy: acl.PolicyWrite, - }, - }, - }, - }), nil) - - resp, err := client.GetEnvoyBootstrapParams(ctx, req) - require.NoError(t, err) - - require.Equal(t, tc.workloadData.Identity, resp.Identity) - require.Equal(t, serverDC, resp.Datacenter) - require.Equal(t, workloadResource.Id.Tenancy.Partition, resp.Partition) - require.Equal(t, workloadResource.Id.Tenancy.Namespace, resp.Namespace) - require.Equal(t, resp.NodeName, tc.workloadData.NodeName) - prototest.AssertDeepEqual(t, tc.expBootstrapCfg, resp.BootstrapConfig) - if tc.expAccessLogs != "" { - require.JSONEq(t, tc.expAccessLogs, resp.AccessLogs[0]) - } - } - - testCases := []testCase{ - { - name: "workload without node", - workloadData: &pbcatalog.Workload{ - Identity: testIdentity, - }, - expBootstrapCfg: nil, - }, - { - name: "workload with node", - workloadData: &pbcatalog.Workload{ - Identity: testIdentity, - NodeName: "test-node", - }, - expBootstrapCfg: nil, - }, - { - name: "single proxy configuration", - workloadData: &pbcatalog.Workload{ - Identity: testIdentity, - }, - proxyCfg: &pbmesh.ComputedProxyConfiguration{ - BootstrapConfig: &pbmesh.BootstrapConfig{ - DogstatsdUrl: "dogstats-url", - }, - }, - expBootstrapCfg: &pbmesh.BootstrapConfig{ - DogstatsdUrl: "dogstats-url", - }, - }, - { - name: "multiple proxy configurations", - workloadData: &pbcatalog.Workload{ - Identity: testIdentity, - }, - proxyCfg: &pbmesh.ComputedProxyConfiguration{ - BootstrapConfig: &pbmesh.BootstrapConfig{ - DogstatsdUrl: "dogstats-url", - StatsdUrl: "stats-url", - }, - DynamicConfig: &pbmesh.DynamicConfig{ - AccessLogs: &pbmesh.AccessLogsConfig{ - Enabled: true, - JsonFormat: "{ \"custom_field\": \"%START_TIME%\" }", - }, - }, - }, - expBootstrapCfg: &pbmesh.BootstrapConfig{ - DogstatsdUrl: "dogstats-url", - StatsdUrl: "stats-url", - }, - expAccessLogs: testAccessLogs, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - run(t, tc) - }) - } -} - func TestGetEnvoyBootstrapParams_Error(t *testing.T) { type testCase struct { name string @@ -483,100 +323,6 @@ func TestGetEnvoyBootstrapParams_Error(t *testing.T) { } -func TestGetEnvoyBootstrapParams_Error_EnableV2(t *testing.T) { - type testCase struct { - name string - expectedErrCode codes.Code - expecteErrMsg string - workload *pbresource.Resource - } - - run := func(t *testing.T, tc testCase) { - resourceClient := svctest.NewResourceServiceBuilder(). - WithRegisterFns(catalog.RegisterTypes, mesh.RegisterTypes). - Run(t) - - options := structs.QueryOptions{Token: testToken} - ctx, err := external.ContextWithQueryOptions(context.Background(), options) - require.NoError(t, err) - - aclResolver := &MockACLResolver{} - aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything). - Return(testutils.ACLServiceRead(t, "doesn't matter"), nil) - - server := NewServer(Config{ - Logger: hclog.NewNullLogger(), - ACLResolver: aclResolver, - Datacenter: serverDC, - EnableV2: true, - ResourceAPIClient: resourceClient, - }) - client := testClient(t, server) - - var req pbdataplane.GetEnvoyBootstrapParamsRequest - // Write the workload resource. - if tc.workload != nil { - _, err = resourceClient.Write(context.Background(), &pbresource.WriteRequest{ - Resource: tc.workload, - }) - require.NoError(t, err) - - req = pbdataplane.GetEnvoyBootstrapParamsRequest{ - ProxyId: tc.workload.Id.Name, - Namespace: tc.workload.Id.Tenancy.Namespace, - Partition: tc.workload.Id.Tenancy.Partition, - } - } else { - req = pbdataplane.GetEnvoyBootstrapParamsRequest{ - ProxyId: "not-found", - Namespace: "default", - Partition: "default", - } - } - - resp, err := client.GetEnvoyBootstrapParams(ctx, &req) - require.Nil(t, resp) - require.Error(t, err) - errStatus, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, tc.expectedErrCode.String(), errStatus.Code().String()) - require.Equal(t, tc.expecteErrMsg, errStatus.Message()) - } - - workload := resourcetest.Resource(pbcatalog.WorkloadType, "test-workload"). - WithData(t, &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1"}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "tcp": {Port: 8080}, - }, - }). - WithTenancy(resource.DefaultNamespacedTenancy()). - Build() - - testCases := []testCase{ - { - name: "workload doesn't exist", - expectedErrCode: codes.NotFound, - expecteErrMsg: "resource not found", - }, - { - name: "workload without identity", - expectedErrCode: codes.InvalidArgument, - expecteErrMsg: "workload \"test-workload\" doesn't have identity associated with it", - workload: workload, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - run(t, tc) - }) - } - -} - func TestGetEnvoyBootstrapParams_Unauthenticated(t *testing.T) { // Mock the ACL resolver to return ErrNotFound. aclResolver := &MockACLResolver{} diff --git a/agent/grpc-external/services/dataplane/get_supported_features.go b/agent/grpc-external/services/dataplane/get_supported_features.go index 09ee1c7ed4c51..ea638715338a3 100644 --- a/agent/grpc-external/services/dataplane/get_supported_features.go +++ b/agent/grpc-external/services/dataplane/get_supported_features.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package dataplane @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/consul/version" ) -func (s *Server) GetSupportedDataplaneFeatures(ctx context.Context, _ *pbdataplane.GetSupportedDataplaneFeaturesRequest) (*pbdataplane.GetSupportedDataplaneFeaturesResponse, error) { +func (s *Server) GetSupportedDataplaneFeatures(ctx context.Context, req *pbdataplane.GetSupportedDataplaneFeaturesRequest) (*pbdataplane.GetSupportedDataplaneFeaturesResponse, error) { logger := s.Logger.Named("get-supported-dataplane-features").With("request_id", external.TraceID()) logger.Trace("Started processing request") diff --git a/agent/grpc-external/services/dataplane/get_supported_features_test.go b/agent/grpc-external/services/dataplane/get_supported_features_test.go index 4761ccb3cb38a..329b5df0f68f1 100644 --- a/agent/grpc-external/services/dataplane/get_supported_features_test.go +++ b/agent/grpc-external/services/dataplane/get_supported_features_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package dataplane diff --git a/agent/grpc-external/services/dataplane/server.go b/agent/grpc-external/services/dataplane/server.go index 3a1809cc048d5..8772893863840 100644 --- a/agent/grpc-external/services/dataplane/server.go +++ b/agent/grpc-external/services/dataplane/server.go @@ -1,10 +1,9 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package dataplane import ( - "github.com/hashicorp/consul/proto-public/pbresource" "google.golang.org/grpc" "github.com/hashicorp/go-hclog" @@ -27,10 +26,6 @@ type Config struct { ACLResolver ACLResolver // Datacenter of the Consul server this gRPC server is hosted on Datacenter string - - // EnableV2 indicates whether a feature flag for v2 APIs is provided. - EnableV2 bool - ResourceAPIClient pbresource.ResourceServiceClient } type StateStore interface { @@ -49,6 +44,6 @@ func NewServer(cfg Config) *Server { var _ pbdataplane.DataplaneServiceServer = (*Server)(nil) -func (s *Server) Register(registrar grpc.ServiceRegistrar) { - pbdataplane.RegisterDataplaneServiceServer(registrar, s) +func (s *Server) Register(grpcServer *grpc.Server) { + pbdataplane.RegisterDataplaneServiceServer(grpcServer, s) } diff --git a/agent/grpc-external/services/dataplane/server_test.go b/agent/grpc-external/services/dataplane/server_test.go index ec57396bcb791..15ac272871e85 100644 --- a/agent/grpc-external/services/dataplane/server_test.go +++ b/agent/grpc-external/services/dataplane/server_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package dataplane diff --git a/agent/grpc-external/services/dns/server.go b/agent/grpc-external/services/dns/server.go index 3485bd2f13eab..a9733c40666be 100644 --- a/agent/grpc-external/services/dns/server.go +++ b/agent/grpc-external/services/dns/server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package dns @@ -37,8 +37,8 @@ func NewServer(cfg Config) *Server { return &Server{cfg} } -func (s *Server) Register(registrar grpc.ServiceRegistrar) { - pbdns.RegisterDNSServiceServer(registrar, s) +func (s *Server) Register(grpcServer *grpc.Server) { + pbdns.RegisterDNSServiceServer(grpcServer, s) } // BufferResponseWriter writes a DNS response to a byte buffer. diff --git a/agent/grpc-external/services/dns/server_test.go b/agent/grpc-external/services/dns/server_test.go index b95221fe94cc7..0144eccc0cd1f 100644 --- a/agent/grpc-external/services/dns/server_test.go +++ b/agent/grpc-external/services/dns/server_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package dns @@ -33,7 +33,7 @@ func helloServer(w dns.ResponseWriter, req *dns.Msg) { w.WriteMsg(m) } -func testClient(t *testing.T, server testutils.GRPCService) pbdns.DNSServiceClient { +func testClient(t *testing.T, server *Server) pbdns.DNSServiceClient { t.Helper() addr := testutils.RunTestServer(t, server) diff --git a/agent/grpc-external/services/dns/server_v2.go b/agent/grpc-external/services/dns/server_v2.go deleted file mode 100644 index 64cf22012ffa0..0000000000000 --- a/agent/grpc-external/services/dns/server_v2.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dns - -import ( - "context" - "fmt" - "net" - - "github.com/miekg/dns" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/status" - - "github.com/hashicorp/go-hclog" - - agentdns "github.com/hashicorp/consul/agent/dns" - "github.com/hashicorp/consul/proto-public/pbdns" -) - -type ConfigV2 struct { - DNSRouter agentdns.DNSRouter - Logger hclog.Logger - TokenFunc func() string -} - -var _ pbdns.DNSServiceServer = (*ServerV2)(nil) - -// ServerV2 is a gRPC server that implements pbdns.DNSServiceServer. -// It is compatible with the refactored V2 DNS server and suitable for -// passing additional metadata along the grpc connection to catalog queries. -type ServerV2 struct { - ConfigV2 -} - -func NewServerV2(cfg ConfigV2) *ServerV2 { - return &ServerV2{cfg} -} - -func (s *ServerV2) Register(registrar grpc.ServiceRegistrar) { - pbdns.RegisterDNSServiceServer(registrar, s) -} - -// Query is a gRPC endpoint that will serve dns requests. It will be consumed primarily by the -// consul dataplane to proxy dns requests to consul. -func (s *ServerV2) Query(ctx context.Context, req *pbdns.QueryRequest) (*pbdns.QueryResponse, error) { - pr, ok := peer.FromContext(ctx) - if !ok { - return nil, fmt.Errorf("error retrieving peer information from context") - } - - var remote net.Addr - // We do this so that we switch to udp/tcp when handling the request since it will be proxied - // through consul through gRPC, and we need to 'fake' the protocol so that the message is trimmed - // according to whether it is UDP or TCP. - switch req.GetProtocol() { - case pbdns.Protocol_PROTOCOL_TCP: - remote = pr.Addr - case pbdns.Protocol_PROTOCOL_UDP: - remoteAddr := pr.Addr.(*net.TCPAddr) - remote = &net.UDPAddr{IP: remoteAddr.IP, Port: remoteAddr.Port} - default: - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("error protocol type not set: %v", req.GetProtocol())) - } - - msg := &dns.Msg{} - err := msg.Unpack(req.Msg) - if err != nil { - s.Logger.Error("error unpacking message", "err", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("failure decoding dns request: %s", err.Error())) - } - - // TODO (v2-dns): parse token and other context metadata from the grpc request/metadata (NET-7885) - reqCtx := agentdns.Context{ - Token: s.TokenFunc(), - } - - resp := s.DNSRouter.HandleRequest(msg, reqCtx, remote) - data, err := resp.Pack() - if err != nil { - s.Logger.Error("error packing message", "err", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("failure encoding dns request: %s", err.Error())) - } - - queryResponse := &pbdns.QueryResponse{Msg: data} - return queryResponse, nil -} diff --git a/agent/grpc-external/services/dns/server_v2_test.go b/agent/grpc-external/services/dns/server_v2_test.go deleted file mode 100644 index 700102935306b..0000000000000 --- a/agent/grpc-external/services/dns/server_v2_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dns - -import ( - "context" - "errors" - - "github.com/hashicorp/go-hclog" - "github.com/miekg/dns" - "github.com/stretchr/testify/mock" - - agentdns "github.com/hashicorp/consul/agent/dns" - "github.com/hashicorp/consul/proto-public/pbdns" -) - -func basicResponse() *dns.Msg { - return &dns.Msg{ - MsgHdr: dns.MsgHdr{ - Opcode: dns.OpcodeQuery, - Response: true, - Authoritative: true, - }, - Compress: true, - Question: []dns.Question{ - { - Name: "abc.com.", - Qtype: dns.TypeANY, - Qclass: dns.ClassINET, - }, - }, - Extra: []dns.RR{ - &dns.TXT{ - Hdr: dns.RR_Header{ - Name: "abc.com.", - Rrtype: dns.TypeTXT, - Class: dns.ClassINET, - Ttl: 0, - }, - Txt: txtRR, - }, - }, - } -} - -func (s *DNSTestSuite) TestProxy_V2Success() { - - testCases := map[string]struct { - question string - configureRouter func(router *agentdns.MockDNSRouter) - clientQuery func(qR *pbdns.QueryRequest) - expectedErr error - }{ - - "happy path udp": { - question: "abc.com.", - configureRouter: func(router *agentdns.MockDNSRouter) { - router.On("HandleRequest", mock.Anything, mock.Anything, mock.Anything). - Return(basicResponse(), nil) - }, - clientQuery: func(qR *pbdns.QueryRequest) { - qR.Protocol = pbdns.Protocol_PROTOCOL_UDP - }, - }, - "happy path tcp": { - question: "abc.com.", - configureRouter: func(router *agentdns.MockDNSRouter) { - router.On("HandleRequest", mock.Anything, mock.Anything, mock.Anything). - Return(basicResponse(), nil) - }, - clientQuery: func(qR *pbdns.QueryRequest) { - qR.Protocol = pbdns.Protocol_PROTOCOL_TCP - }, - }, - "No protocol set": { - question: "abc.com.", - clientQuery: func(qR *pbdns.QueryRequest) {}, - expectedErr: errors.New("error protocol type not set: PROTOCOL_UNSET_UNSPECIFIED"), - }, - "Invalid question": { - question: "notvalid", - clientQuery: func(qR *pbdns.QueryRequest) { - qR.Protocol = pbdns.Protocol_PROTOCOL_UDP - }, - expectedErr: errors.New("failure decoding dns request"), - }, - } - - for name, tc := range testCases { - s.Run(name, func() { - router := agentdns.NewMockDNSRouter(s.T()) - - if tc.configureRouter != nil { - tc.configureRouter(router) - } - - server := NewServerV2(ConfigV2{ - Logger: hclog.Default(), - DNSRouter: router, - TokenFunc: func() string { return "" }, - }) - - client := testClient(s.T(), server) - - req := dns.Msg{} - req.SetQuestion(tc.question, dns.TypeA) - - bytes, _ := req.Pack() - - clientReq := &pbdns.QueryRequest{Msg: bytes} - tc.clientQuery(clientReq) - clientResp, err := client.Query(context.Background(), clientReq) - if tc.expectedErr != nil { - s.Require().Error(err, "no errror calling gRPC endpoint") - s.Require().ErrorContains(err, tc.expectedErr.Error()) - } else { - s.Require().NoError(err, "error calling gRPC endpoint") - - resp := clientResp.GetMsg() - var dnsResp dns.Msg - - err = dnsResp.Unpack(resp) - s.Require().NoError(err, "error unpacking dns response") - rr := dnsResp.Extra[0].(*dns.TXT) - s.Require().EqualValues(rr.Txt, txtRR) - } - }) - } -} diff --git a/agent/grpc-external/services/peerstream/health_snapshot.go b/agent/grpc-external/services/peerstream/health_snapshot.go index efd60a7f1039e..dd9a10c67469b 100644 --- a/agent/grpc-external/services/peerstream/health_snapshot.go +++ b/agent/grpc-external/services/peerstream/health_snapshot.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/health_snapshot_test.go b/agent/grpc-external/services/peerstream/health_snapshot_test.go index 6759db252d2cc..7ea404f3854c3 100644 --- a/agent/grpc-external/services/peerstream/health_snapshot_test.go +++ b/agent/grpc-external/services/peerstream/health_snapshot_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/replication.go b/agent/grpc-external/services/peerstream/replication.go index 692a475235be0..a0c1e4387f1c6 100644 --- a/agent/grpc-external/services/peerstream/replication.go +++ b/agent/grpc-external/services/peerstream/replication.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/server.go b/agent/grpc-external/services/peerstream/server.go index 6cd32c9287b9f..58e436bd1f5cd 100644 --- a/agent/grpc-external/services/peerstream/server.go +++ b/agent/grpc-external/services/peerstream/server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream @@ -83,8 +83,8 @@ func requireNotNil(v interface{}, name string) { var _ pbpeerstream.PeerStreamServiceServer = (*Server)(nil) -func (s *Server) Register(registrar grpc.ServiceRegistrar) { - pbpeerstream.RegisterPeerStreamServiceServer(registrar, s) +func (s *Server) Register(grpcServer *grpc.Server) { + pbpeerstream.RegisterPeerStreamServiceServer(grpcServer, s) } type Backend interface { diff --git a/agent/grpc-external/services/peerstream/server_test.go b/agent/grpc-external/services/peerstream/server_test.go index 836b09d89b2b1..cb7c60e3cf0a2 100644 --- a/agent/grpc-external/services/peerstream/server_test.go +++ b/agent/grpc-external/services/peerstream/server_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index 9f2d9cf896f3d..61c98d3f07894 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index 37e5e99a76f74..a314068ee9a71 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream @@ -690,7 +690,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { req := msg.GetRequest() require.NotNil(r, req) require.Equal(r, pbpeerstream.TypeURLExportedService, req.ResourceURL) - prototest.AssertDeepEqual(r, expectAck, msg) + prototest.AssertDeepEqual(t, expectAck, msg) }) expect := Status{ diff --git a/agent/grpc-external/services/peerstream/stream_tracker.go b/agent/grpc-external/services/peerstream/stream_tracker.go index c74a1b284f09e..abb5a003a3992 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker.go +++ b/agent/grpc-external/services/peerstream/stream_tracker.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/stream_tracker_test.go b/agent/grpc-external/services/peerstream/stream_tracker_test.go index 33ea536469d8a..d676587a2520e 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker_test.go +++ b/agent/grpc-external/services/peerstream/stream_tracker_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/subscription_blocking.go b/agent/grpc-external/services/peerstream/subscription_blocking.go index a1257d3d33819..7fa8bc1eff59f 100644 --- a/agent/grpc-external/services/peerstream/subscription_blocking.go +++ b/agent/grpc-external/services/peerstream/subscription_blocking.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/subscription_manager.go b/agent/grpc-external/services/peerstream/subscription_manager.go index 92be1bb6b09de..4fcd27635b81d 100644 --- a/agent/grpc-external/services/peerstream/subscription_manager.go +++ b/agent/grpc-external/services/peerstream/subscription_manager.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream @@ -882,10 +882,6 @@ func (m *subscriptionManager) subscribeServerAddrs( idx uint64, updateCh chan<- cache.UpdateEvent, ) (uint64, error) { - // TODO(inproc-grpc) - Look into using the insecure in-process gRPC Channel - // to get notified for server address updates instead of hooking into the - // subscription service. - // following code adapted from serverdiscovery/watch_servers.go sub, err := m.backend.Subscribe(&stream.SubscribeRequest{ Topic: autopilotevents.EventTopicReadyServers, diff --git a/agent/grpc-external/services/peerstream/subscription_manager_test.go b/agent/grpc-external/services/peerstream/subscription_manager_test.go index 2abd40be956ab..9e34756dbd4bc 100644 --- a/agent/grpc-external/services/peerstream/subscription_manager_test.go +++ b/agent/grpc-external/services/peerstream/subscription_manager_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/subscription_state.go b/agent/grpc-external/services/peerstream/subscription_state.go index a1a370a3ec4c6..dba315370de7f 100644 --- a/agent/grpc-external/services/peerstream/subscription_state.go +++ b/agent/grpc-external/services/peerstream/subscription_state.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/subscription_state_test.go b/agent/grpc-external/services/peerstream/subscription_state_test.go index cc3e49ab4c9cb..3cba66c9c2be4 100644 --- a/agent/grpc-external/services/peerstream/subscription_state_test.go +++ b/agent/grpc-external/services/peerstream/subscription_state_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/subscription_view.go b/agent/grpc-external/services/peerstream/subscription_view.go index 575729bc71df9..c85c82b15e5c8 100644 --- a/agent/grpc-external/services/peerstream/subscription_view.go +++ b/agent/grpc-external/services/peerstream/subscription_view.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/subscription_view_test.go b/agent/grpc-external/services/peerstream/subscription_view_test.go index cd2f61e60feb7..a51ca57e2902c 100644 --- a/agent/grpc-external/services/peerstream/subscription_view_test.go +++ b/agent/grpc-external/services/peerstream/subscription_view_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/peerstream/testing.go b/agent/grpc-external/services/peerstream/testing.go index b5e79a6347074..341885e985a9e 100644 --- a/agent/grpc-external/services/peerstream/testing.go +++ b/agent/grpc-external/services/peerstream/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peerstream diff --git a/agent/grpc-external/services/resource/delete.go b/agent/grpc-external/services/resource/delete.go index dbfdf07edb00f..b3045b3d6d294 100644 --- a/agent/grpc-external/services/resource/delete.go +++ b/agent/grpc-external/services/resource/delete.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resource @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "strings" "time" "github.com/oklog/ulid/v2" @@ -19,113 +18,75 @@ import ( "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" - pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" ) -// Delete deletes a resource. +// Deletes a resource. // - To delete a resource regardless of the stored version, set Version = "" // - Supports deleting a resource by name, hence Id.Uid may be empty. // - Delete of a previously deleted or non-existent resource is a no-op to support idempotency. // - Errors with Aborted if the requested Version does not match the stored Version. // - Errors with PermissionDenied if ACL check fails -// - Errors with PermissionDenied if a license feature tied to the resource type is not allowed. func (s *Server) Delete(ctx context.Context, req *pbresource.DeleteRequest) (*pbresource.DeleteResponse, error) { - reg, err := s.ensureDeleteRequestValid(req) - if err != nil { + if err := validateDeleteRequest(req); err != nil { return nil, err } - entMeta := v2TenancyToV1EntMeta(req.Id.Tenancy) - authz, authzContext, err := s.getAuthorizer(tokenFromContext(ctx), entMeta) + reg, err := s.resolveType(req.Id.Type) if err != nil { return nil, err } - // Retrieve resource since ACL hook requires it. Furthermore, we'll need the - // read to be strongly consistent if the passed in Version or Uid are empty. - consistency := storage.EventualConsistency - if req.Version == "" || req.Id.Uid == "" { - consistency = storage.StrongConsistency + authz, err := s.getAuthorizer(tokenFromContext(ctx)) + if err != nil { + return nil, err } - // Apply defaults when tenancy units empty. - v1EntMetaToV2Tenancy(reg, entMeta, req.Id.Tenancy) - - // Only non-CAS deletes (version=="") are automatically retried. - err = s.retryCAS(ctx, req.Version, func() error { - existing, err := s.Backend.Read(ctx, consistency, req.Id) - switch { - case errors.Is(err, storage.ErrNotFound): - // Deletes are idempotent so no-op when not found - return nil - case err != nil: - return status.Errorf(codes.Internal, "failed read: %v", err) - } + err = reg.ACLs.Write(authz, req.Id) + switch { + case acl.IsErrPermissionDenied(err): + return nil, status.Error(codes.PermissionDenied, err.Error()) + case err != nil: + return nil, status.Errorf(codes.Internal, "failed write acl: %v", err) + } - // Check ACLs - err = reg.ACLs.Write(authz, authzContext, existing) + // The storage backend requires a Version and Uid to delete a resource based + // on CAS semantics. When either are not provided, the resource must be read + // with a strongly consistent read to retrieve either or both. + // + // n.b.: There is a chance DeleteCAS may fail with a storage.ErrCASFailure + // if an update occurs between the Read and DeleteCAS. Consider refactoring + // to use retryCAS() similar to the Write endpoint to close this gap. + deleteVersion := req.Version + deleteId := req.Id + if deleteVersion == "" || deleteId.Uid == "" { + existing, err := s.Backend.Read(ctx, storage.StrongConsistency, req.Id) switch { - case acl.IsErrPermissionDenied(err): - return status.Error(codes.PermissionDenied, err.Error()) - case err != nil: - return status.Errorf(codes.Internal, "failed write acl: %v", err) - } - - deleteVersion := req.Version - deleteId := req.Id - if deleteVersion == "" || deleteId.Uid == "" { + case err == nil: deleteVersion = existing.Version deleteId = existing.Id + case errors.Is(err, storage.ErrNotFound): + // Deletes are idempotent so no-op when not found + return &pbresource.DeleteResponse{}, nil + default: + return nil, status.Errorf(codes.Internal, "failed read: %v", err) } + } - // Check finalizers for a deferred delete - if resource.HasFinalizers(existing) { - if resource.IsMarkedForDeletion(existing) { - // Delete previously requested and finalizers still present so nothing to do - return nil - } - - // Mark for deletion and let controllers that put finalizers in place do their - // thing. Note we're passing in a clone of the recently read resource since - // we've not crossed a network/serialization boundary since the read and we - // don't want to mutate the in-mem reference. - _, err := s.markForDeletion(ctx, clone(existing)) - return err - } - - // Continue with an immediate delete - if err := s.maybeCreateTombstone(ctx, deleteId); err != nil { - return err - } - - err = s.Backend.DeleteCAS(ctx, deleteId, deleteVersion) - return err - }) + if err := s.maybeCreateTombstone(ctx, deleteId); err != nil { + return nil, err + } + err = s.Backend.DeleteCAS(ctx, deleteId, deleteVersion) switch { case err == nil: return &pbresource.DeleteResponse{}, nil case errors.Is(err, storage.ErrCASFailure): return nil, status.Error(codes.Aborted, err.Error()) - case isGRPCStatusError(err): - // Pass through gRPC errors from internal calls to resource service - // endpoints (e.g. Write when marking for deletion). - return nil, err default: return nil, status.Errorf(codes.Internal, "failed delete: %v", err) } } -func (s *Server) markForDeletion(ctx context.Context, res *pbresource.Resource) (*pbresource.DeleteResponse, error) { - // Write the deletion timestamp - res.Metadata[resource.DeletionTimestampKey] = time.Now().Format(time.RFC3339) - _, err := s.Write(ctx, &pbresource.WriteRequest{Resource: res}) - if err != nil { - return nil, err - } - return &pbresource.DeleteResponse{}, nil -} - // Create a tombstone to capture the intent to delete child resources. // Tombstones are created preemptively to prevent partial failures even though // we are currently unaware of the success/failure/no-op of DeleteCAS. In @@ -157,7 +118,7 @@ func (s *Server) maybeCreateTombstone(ctx context.Context, deleteId *pbresource. Id: &pbresource.ID{ Type: resource.TypeV1Tombstone, Tenancy: deleteId.Tenancy, - Name: TombstoneNameFor(deleteId), + Name: tombstoneName(deleteId), Uid: ulid.Make().String(), }, Generation: ulid.Make().String(), @@ -182,50 +143,20 @@ func (s *Server) maybeCreateTombstone(ctx context.Context, deleteId *pbresource. } } -func (s *Server) ensureDeleteRequestValid(req *pbresource.DeleteRequest) (*resource.Registration, error) { +func validateDeleteRequest(req *pbresource.DeleteRequest) error { if req.Id == nil { - return nil, status.Errorf(codes.InvalidArgument, "id is required") + return status.Errorf(codes.InvalidArgument, "id is required") } if err := validateId(req.Id, "id"); err != nil { - return nil, err - } - - reg, err := s.resolveType(req.Id.Type) - if err != nil { - return nil, err - } - - if err = s.FeatureCheck(reg); err != nil { - return nil, err - } - - if err = checkV2Tenancy(s.UseV2Tenancy, req.Id.Type); err != nil { - return nil, err - } - - if err := validateScopedTenancy(reg.Scope, reg.Type, req.Id.Tenancy, false); err != nil { - return nil, err - } - - if err := blockBuiltinsDeletion(reg.Type, req.Id); err != nil { - return nil, err + return err } - return reg, nil + return nil } // Maintains a deterministic mapping between a resource and it's tombstone's // name by embedding the resources's Uid in the name. -func TombstoneNameFor(deleteId *pbresource.ID) string { +func tombstoneName(deleteId *pbresource.ID) string { // deleteId.Name is just included for easier identification - return fmt.Sprintf("tombstone-%v-%v", deleteId.Name, strings.ToLower(deleteId.Uid)) -} - -func blockDefaultNamespaceDeletion(rtype *pbresource.Type, id *pbresource.ID) error { - if id.Name == resource.DefaultNamespaceName && - id.Tenancy.Partition == resource.DefaultPartitionName && - resource.EqualType(rtype, pbtenancy.NamespaceType) { - return status.Errorf(codes.InvalidArgument, "cannot delete default namespace") - } - return nil + return fmt.Sprintf("tombstone-%v-%v", deleteId.Name, deleteId.Uid) } diff --git a/agent/grpc-external/services/resource/delete_ce.go b/agent/grpc-external/services/resource/delete_ce.go deleted file mode 100644 index d2ff805a24a47..0000000000000 --- a/agent/grpc-external/services/resource/delete_ce.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package resource - -import "github.com/hashicorp/consul/proto-public/pbresource" - -func blockBuiltinsDeletion(rtype *pbresource.Type, id *pbresource.ID) error { - if err := blockDefaultNamespaceDeletion(rtype, id); err != nil { - return err - } - return nil -} diff --git a/agent/grpc-external/services/resource/delete_test.go b/agent/grpc-external/services/resource/delete_test.go index 76403bb4d6baa..0e98d3fd57a7a 100644 --- a/agent/grpc-external/services/resource/delete_test.go +++ b/agent/grpc-external/services/resource/delete_test.go @@ -1,173 +1,75 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -package resource_test +package resource import ( "context" - "fmt" - "strings" "testing" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" "github.com/hashicorp/consul/acl/resolver" - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" - rtest "github.com/hashicorp/consul/internal/resource/resourcetest" + "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" - pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" - pbdemo "github.com/hashicorp/consul/proto/private/pbdemo/v1" ) func TestDelete_InputValidation(t *testing.T) { - type testCase struct { - modFn func(artistId, recordLabelId, executiveId *pbresource.ID) *pbresource.ID - errContains string - } - - run := func(t *testing.T, client pbresource.ResourceServiceClient, tc testCase) { - executive, err := demo.GenerateV1Executive("marvin", "CEO") - require.NoError(t, err) - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) + server := testServer(t) + client := testClient(t, server) - req := &pbresource.DeleteRequest{Id: tc.modFn(artist.Id, recordLabel.Id, executive.Id), Version: ""} - _, err = client.Delete(context.Background(), req) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, tc.errContains) - } + demo.RegisterTypes(server.Registry) - testCases := map[string]testCase{ - "no id": { - modFn: func(_, _, _ *pbresource.ID) *pbresource.ID { - return nil - }, - errContains: "id is required", + testCases := map[string]func(*pbresource.DeleteRequest){ + "no id": func(req *pbresource.DeleteRequest) { req.Id = nil }, + "no type": func(req *pbresource.DeleteRequest) { req.Id.Type = nil }, + "no tenancy": func(req *pbresource.DeleteRequest) { req.Id.Tenancy = nil }, + "no name": func(req *pbresource.DeleteRequest) { req.Id.Name = "" }, + // clone necessary to not pollute DefaultTenancy + "tenancy partition not default": func(req *pbresource.DeleteRequest) { + req.Id.Tenancy = clone(req.Id.Tenancy) + req.Id.Tenancy.Partition = "" }, - "no type": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Type = nil - return artistId - }, - errContains: "id.type is required", + "tenancy namespace not default": func(req *pbresource.DeleteRequest) { + req.Id.Tenancy = clone(req.Id.Tenancy) + req.Id.Tenancy.Namespace = "" }, - "no name": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Name = "" - return artistId - }, - errContains: "id.name invalid", - }, - "mixed case name": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Name = "DepecheMode" - return artistId - }, - errContains: "id.name invalid", - }, - "name too long": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Name = strings.Repeat("n", resource.MaxNameLength+1) - return artistId - }, - errContains: "id.name invalid", - }, - "partition mixed case": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Partition = "Default" - return artistId - }, - errContains: "id.tenancy.partition invalid", - }, - "partition name too long": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) - return artistId - }, - errContains: "id.tenancy.partition invalid", - }, - "namespace mixed case": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Namespace = "Default" - return artistId - }, - errContains: "id.tenancy.namespace invalid", - }, - "namespace name too long": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) - return artistId - }, - errContains: "id.tenancy.namespace invalid", - }, - "partition scoped resource with namespace": { - modFn: func(_, recordLabelId, _ *pbresource.ID) *pbresource.ID { - recordLabelId.Tenancy.Namespace = "ishouldnothaveanamespace" - return recordLabelId - }, - errContains: "cannot have a namespace", - }, - "cluster scoped resource with partition": { - modFn: func(_, _, executiveId *pbresource.ID) *pbresource.ID { - executiveId.Tenancy.Partition = "ishouldnothaveapartition" - executiveId.Tenancy.Namespace = "" - return executiveId - }, - errContains: "cannot have a partition", - }, - "cluster scoped resource with namespace": { - modFn: func(_, _, executiveId *pbresource.ID) *pbresource.ID { - executiveId.Tenancy.Partition = "" - executiveId.Tenancy.Namespace = "ishouldnothaveanamespace" - return executiveId - }, - errContains: "cannot have a namespace", + "tenancy peername not local": func(req *pbresource.DeleteRequest) { + req.Id.Tenancy = clone(req.Id.Tenancy) + req.Id.Tenancy.PeerName = "" }, } + for desc, modFn := range testCases { + t.Run(desc, func(t *testing.T) { + res, err := demo.GenerateV2Artist() + require.NoError(t, err) + + req := &pbresource.DeleteRequest{Id: res.Id, Version: ""} + modFn(req) - for _, useV2Tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(useV2Tenancy). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - for desc, tc := range testCases { - t.Run(desc, func(t *testing.T) { - run(t, client, tc) - }) - } + _, err = client.Delete(testContext(t), req) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) }) } } func TestDelete_TypeNotRegistered(t *testing.T) { - for _, useV2Tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { - client := svctest.NewResourceServiceBuilder().WithV2Tenancy(useV2Tenancy).Run(t) + t.Parallel() - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) + _, client, ctx := testDeps(t) + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) - // delete artist with unregistered type - _, err = client.Delete(context.Background(), &pbresource.DeleteRequest{Id: artist.Id, Version: ""}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, "not registered") - }) - } + // delete artist with unregistered type + _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: artist.Id, Version: ""}) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) } func TestDelete_ACLs(t *testing.T) { @@ -180,7 +82,7 @@ func TestDelete_ACLs(t *testing.T) { authz: AuthorizerFrom(t, demo.ArtistV1WritePolicy), assertErrFn: func(err error) { require.Error(t, err) - require.Equal(t, codes.PermissionDenied.String(), status.Code(err).String(), err) + require.Equal(t, codes.PermissionDenied.String(), status.Code(err).String()) }, }, "delete allowed": { @@ -193,24 +95,23 @@ func TestDelete_ACLs(t *testing.T) { for desc, tc := range testcases { t.Run(desc, func(t *testing.T) { - builder := svctest.NewResourceServiceBuilder().WithRegisterFns(demo.RegisterTypes) - client := builder.Run(t) + server := testServer(t) + client := testClient(t, server) + + mockACLResolver := &MockACLResolver{} + mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(tc.authz, nil) + server.ACLResolver = mockACLResolver + demo.RegisterTypes(server.Registry) artist, err := demo.GenerateV2Artist() require.NoError(t, err) - // Write test resource to delete. - rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: artist}) + artist, err = server.Backend.WriteCAS(context.Background(), artist) require.NoError(t, err) - // Mock is put in place after the above "write" since the "write" must also pass the ACL check. - mockACLResolver := &svc.MockACLResolver{} - mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). - Return(tc.authz, nil) - builder.ServiceImpl().Config.ACLResolver = mockACLResolver - - // Exercise ACL. - _, err = client.Delete(testContext(t), &pbresource.DeleteRequest{Id: rsp.Resource.Id}) + // exercise ACL + _, err = client.Delete(testContext(t), &pbresource.DeleteRequest{Id: artist.Id}) tc.assertErrFn(err) }) } @@ -219,307 +120,116 @@ func TestDelete_ACLs(t *testing.T) { func TestDelete_Success(t *testing.T) { t.Parallel() - run := func(t *testing.T, client pbresource.ResourceServiceClient, tc deleteTestCase, modFn func(artistId, recordlabelId *pbresource.ID) *pbresource.ID) { - ctx := context.Background() - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - writeRsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: recordLabel}) - require.NoError(t, err) - recordLabel = writeRsp.Resource - originalRecordLabelId := clone(recordLabel.Id) - - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - writeRsp, err = client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) - require.NoError(t, err) - artist = writeRsp.Resource - originalArtistId := clone(artist.Id) - - // Pick the resource to be deleted based on type's scope and mod tenancy - // based on the tenancy test case. - deleteId := modFn(artist.Id, recordLabel.Id) - deleteReq := tc.deleteReqFn(recordLabel) - if proto.Equal(deleteId.Type, demo.TypeV2Artist) { - deleteReq = tc.deleteReqFn(artist) - } - - // Delete - _, err = client.Delete(ctx, deleteReq) - require.NoError(t, err) - - // Verify deleted - _, err = client.Read(ctx, &pbresource.ReadRequest{Id: deleteId}) - require.Error(t, err) - require.Equal(t, codes.NotFound.String(), status.Code(err).String()) - - // Derive tombstone name from resource that was deleted. - tname := svc.TombstoneNameFor(originalRecordLabelId) - if proto.Equal(deleteId.Type, demo.TypeV2Artist) { - tname = svc.TombstoneNameFor(originalArtistId) - } - - // Verify tombstone created - _, err = client.Read(ctx, &pbresource.ReadRequest{ - Id: &pbresource.ID{ - Name: tname, - Type: resource.TypeV1Tombstone, - Tenancy: deleteReq.Id.Tenancy, - }, - }) - require.NoError(t, err, "expected tombstone to be found") - } - for desc, tc := range deleteTestCases() { t.Run(desc, func(t *testing.T) { - for tenancyDesc, modFn := range tenancyCases() { - t.Run(tenancyDesc, func(t *testing.T) { - for _, useV2Tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(useV2Tenancy). - WithRegisterFns(demo.RegisterTypes). - Run(t) - run(t, client, tc, modFn) - }) - } - }) - } - }) - } -} - -func TestDelete_NonCAS_Retry(t *testing.T) { - server := testServer(t) - client := testClient(t, server) - demo.RegisterTypes(server.Registry) - - res, err := demo.GenerateV2Artist() - require.NoError(t, err) - - rsp1, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: res}) - require.NoError(t, err) - - // Simulate conflicting versions by blocking the RPC after it has read the - // current version of the resource, but before it tries to do a CAS delete - // based on that version. - backend := &blockOnceBackend{ - Backend: server.Backend, - - readCompletedCh: make(chan struct{}), - blockCh: make(chan struct{}), - } - server.Backend = backend - - deleteResultCh := make(chan error) - go func() { - _, err := client.Delete(testContext(t), &pbresource.DeleteRequest{Id: rsp1.Resource.Id, Version: ""}) - deleteResultCh <- err - }() - - // Wait for the read, to ensure the Delete in the goroutine above has read the - // current version of the resource. - <-backend.readCompletedCh - - // Update the artist so that its version is different from the version read by Delete - res = modifyArtist(t, rsp1.Resource) - _, err = backend.WriteCAS(testContext(t), res) - require.NoError(t, err) - - // Unblock the Delete by allowing the backend read to return and attempt a CAS delete. - // The CAS delete should fail once, and they retry the backend read/delete cycle again - // successfully. - close(backend.blockCh) - - // Check that the delete succeeded anyway because of a retry. - require.NoError(t, <-deleteResultCh) -} - -func TestDelete_TombstoneDeletionDoesNotCreateNewTombstone(t *testing.T) { - t.Parallel() - - for _, useV2Tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { - ctx := context.Background() - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(useV2Tenancy). - WithRegisterFns(demo.RegisterTypes). - Run(t) - + server, client, ctx := testDeps(t) + demo.RegisterTypes(server.Registry) artist, err := demo.GenerateV2Artist() require.NoError(t, err) rsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) require.NoError(t, err) + artistId := clone(rsp.Resource.Id) artist = rsp.Resource - // delete artist - _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: artist.Id, Version: ""}) + // delete + _, err = client.Delete(ctx, tc.deleteReqFn(artist)) require.NoError(t, err) - // verify artist's tombstone created - rsp2, err := client.Read(ctx, &pbresource.ReadRequest{ + // verify deleted + _, err = server.Backend.Read(ctx, storage.StrongConsistency, artistId) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrNotFound) + + // verify tombstone created + _, err = client.Read(ctx, &pbresource.ReadRequest{ Id: &pbresource.ID{ - Name: svc.TombstoneNameFor(artist.Id), + Name: tombstoneName(artistId), Type: resource.TypeV1Tombstone, Tenancy: artist.Id.Tenancy, }, }) require.NoError(t, err) - tombstone := rsp2.Resource - - // delete artist's tombstone - _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: tombstone.Id, Version: tombstone.Version}) - require.NoError(t, err) - - // verify no new tombstones created and artist's existing tombstone deleted - rsp3, err := client.List(ctx, &pbresource.ListRequest{Type: resource.TypeV1Tombstone, Tenancy: artist.Id.Tenancy}) - require.NoError(t, err) - require.Empty(t, rsp3.Resources) }) } } -func TestDelete_NotFound(t *testing.T) { +func TestDelete_TombstoneDeletionDoesNotCreateNewTombstone(t *testing.T) { t.Parallel() - run := func(t *testing.T, client pbresource.ResourceServiceClient, tc deleteTestCase) { - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) + server, client, ctx := testDeps(t) + demo.RegisterTypes(server.Registry) - // verify delete of non-existant or already deleted resource is a no-op - _, err = client.Delete(context.Background(), tc.deleteReqFn(artist)) - require.NoError(t, err) - } + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) - for _, useV2Tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(useV2Tenancy). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - for desc, tc := range deleteTestCases() { - t.Run(desc, func(t *testing.T) { - run(t, client, tc) - }) - } - }) - } -} + rsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) + require.NoError(t, err) + artist = rsp.Resource -func TestDelete_VersionMismatch(t *testing.T) { - t.Parallel() + // delete artist + _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: artist.Id, Version: ""}) + require.NoError(t, err) - for _, useV2Tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(useV2Tenancy). - WithRegisterFns(demo.RegisterTypes). - Run(t) + // verify artist's tombstone created + rsp2, err := client.Read(ctx, &pbresource.ReadRequest{ + Id: &pbresource.ID{ + Name: tombstoneName(artist.Id), + Type: resource.TypeV1Tombstone, + Tenancy: artist.Id.Tenancy, + }, + }) + require.NoError(t, err) + tombstone := rsp2.Resource - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: artist}) - require.NoError(t, err) + // delete artist's tombstone + _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: tombstone.Id, Version: tombstone.Version}) + require.NoError(t, err) - // delete with a version that is different from the stored version - _, err = client.Delete(context.Background(), &pbresource.DeleteRequest{Id: rsp.Resource.Id, Version: "non-existent-version"}) - require.Error(t, err) - require.Equal(t, codes.Aborted.String(), status.Code(err).String()) - require.ErrorContains(t, err, "CAS operation failed") - }) - } + // verify no new tombstones created and artist's existing tombstone deleted + rsp3, err := client.List(ctx, &pbresource.ListRequest{Type: resource.TypeV1Tombstone, Tenancy: artist.Id.Tenancy}) + require.NoError(t, err) + require.Empty(t, rsp3.Resources) } -func TestDelete_MarkedForDeletionWhenFinalizersPresent(t *testing.T) { - for _, useV2Tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { - ctx := context.Background() - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(useV2Tenancy). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Create a resource with a finalizer - res := rtest.Resource(demo.TypeV1Artist, "manwithnoname"). - WithTenancy(resource.DefaultClusteredTenancy()). - WithData(t, &pbdemo.Artist{Name: "Man With No Name"}). - WithMeta(resource.FinalizerKey, "finalizer1"). - Write(t, client) - - // Delete it - _, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id}) - require.NoError(t, err) - - // Verify resource has been marked for deletion - rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id}) - require.NoError(t, err) - require.True(t, resource.IsMarkedForDeletion(rsp.Resource)) +func TestDelete_NotFound(t *testing.T) { + t.Parallel() - // Delete again - should be no-op - _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id}) + for desc, tc := range deleteTestCases() { + t.Run(desc, func(t *testing.T) { + server, client, ctx := testDeps(t) + demo.RegisterTypes(server.Registry) + artist, err := demo.GenerateV2Artist() require.NoError(t, err) - // Verify no-op by checking version still the same - rsp2, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id}) + // verify delete of non-existant or already deleted resource is a no-op + _, err = client.Delete(ctx, tc.deleteReqFn(artist)) require.NoError(t, err) - rtest.RequireVersionUnchanged(t, rsp2.Resource, rsp.Resource.Version) }) } } -func TestDelete_ImmediatelyDeletedAfterFinalizersRemoved(t *testing.T) { - for _, useV2Tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { - ctx := context.Background() - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(useV2Tenancy). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Create a resource with a finalizer - res := rtest.Resource(demo.TypeV1Artist, "manwithnoname"). - WithTenancy(resource.DefaultClusteredTenancy()). - WithData(t, &pbdemo.Artist{Name: "Man With No Name"}). - WithMeta(resource.FinalizerKey, "finalizer1"). - Write(t, client) - - // Delete should mark it for deletion - _, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id}) - require.NoError(t, err) - - // Remove the finalizer - rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id}) - require.NoError(t, err) - resource.RemoveFinalizer(rsp.Resource, "finalizer1") - _, err = client.Write(ctx, &pbresource.WriteRequest{Resource: rsp.Resource}) - require.NoError(t, err) +func TestDelete_VersionMismatch(t *testing.T) { + t.Parallel() - // Delete should be immediate - _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: rsp.Resource.Id}) - require.NoError(t, err) + server, client, ctx := testDeps(t) + demo.RegisterTypes(server.Registry) + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) + rsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) + require.NoError(t, err) - // Verify deleted - _, err = client.Read(ctx, &pbresource.ReadRequest{Id: rsp.Resource.Id}) - require.Error(t, err) - require.Equal(t, codes.NotFound.String(), status.Code(err).String()) - }) - } + // delete with a version that is different from the stored version + _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: rsp.Resource.Id, Version: "non-existent-version"}) + require.Error(t, err) + require.Equal(t, codes.Aborted.String(), status.Code(err).String()) + require.ErrorContains(t, err, "CAS operation failed") } -func TestDelete_BlockDeleteDefaultNamespace(t *testing.T) { - client := svctest.NewResourceServiceBuilder().WithV2Tenancy(true).Run(t) - - id := &pbresource.ID{ - Name: resource.DefaultNamespaceName, - Type: pbtenancy.NamespaceType, - Tenancy: &pbresource.Tenancy{Partition: resource.DefaultPartitionName}, - } - _, err := client.Delete(context.Background(), &pbresource.DeleteRequest{Id: id}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, "cannot delete default namespace") +func testDeps(t *testing.T) (*Server, pbresource.ResourceServiceClient, context.Context) { + server := testServer(t) + client := testClient(t, server) + return server, client, context.Background() } type deleteTestCase struct { diff --git a/agent/grpc-external/services/resource/list.go b/agent/grpc-external/services/resource/list.go index 62ec2d7975bb6..77269e74688ff 100644 --- a/agent/grpc-external/services/resource/list.go +++ b/agent/grpc-external/services/resource/list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resource @@ -10,27 +10,28 @@ import ( "google.golang.org/grpc/status" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" ) func (s *Server) List(ctx context.Context, req *pbresource.ListRequest) (*pbresource.ListResponse, error) { - reg, err := s.ensureListRequestValid(req) + if err := validateListRequest(req); err != nil { + return nil, err + } + + // check type + reg, err := s.resolveType(req.Type) if err != nil { return nil, err } - // v1 ACL subsystem is "wildcard" aware so just pass on through. - entMeta := v2TenancyToV1EntMeta(req.Tenancy) - token := tokenFromContext(ctx) - authz, authzContext, err := s.getAuthorizer(token, entMeta) + authz, err := s.getAuthorizer(tokenFromContext(ctx)) if err != nil { return nil, err } - // Check ACLs. - err = reg.ACLs.List(authz, authzContext) + // check acls + err = reg.ACLs.List(authz, req.Tenancy) switch { case acl.IsErrPermissionDenied(err): return nil, status.Error(codes.PermissionDenied, err.Error()) @@ -38,9 +39,6 @@ func (s *Server) List(ctx context.Context, req *pbresource.ListRequest) (*pbreso return nil, status.Errorf(codes.Internal, "failed list acl: %v", err) } - // Ensure we're defaulting correctly when request tenancy units are empty. - v1EntMetaToV2Tenancy(reg, entMeta, req.Tenancy) - resources, err := s.Backend.List( ctx, readConsistencyFrom(ctx), @@ -54,22 +52,13 @@ func (s *Server) List(ctx context.Context, req *pbresource.ListRequest) (*pbreso result := make([]*pbresource.Resource, 0) for _, resource := range resources { - // Filter out non-matching GroupVersion. + // filter out non-matching GroupVersion if resource.Id.Type.GroupVersion != req.Type.GroupVersion { continue } - // Need to rebuild authorizer per resource since wildcard inputs may - // result in different tenancies. Consider caching per tenancy if this - // is deemed expensive. - entMeta = v2TenancyToV1EntMeta(resource.Id.Tenancy) - authz, authzContext, err = s.getAuthorizer(token, entMeta) - if err != nil { - return nil, err - } - - // Filter out items that don't pass read ACLs. - err = reg.ACLs.Read(authz, authzContext, resource.Id, resource) + // filter out items that don't pass read ACLs + err = reg.ACLs.Read(authz, resource.Id) switch { case acl.IsErrPermissionDenied(err): continue @@ -81,46 +70,15 @@ func (s *Server) List(ctx context.Context, req *pbresource.ListRequest) (*pbreso return &pbresource.ListResponse{Resources: result}, nil } -func (s *Server) ensureListRequestValid(req *pbresource.ListRequest) (*resource.Registration, error) { +func validateListRequest(req *pbresource.ListRequest) error { var field string switch { case req.Type == nil: field = "type" case req.Tenancy == nil: field = "tenancy" + default: + return nil } - - if field != "" { - return nil, status.Errorf(codes.InvalidArgument, "%s is required", field) - } - - // Check type exists. - reg, err := s.resolveType(req.Type) - if err != nil { - return nil, err - } - - // Ignore return value since read ops are allowed but will log a warning if the feature is - // not enabled in the license. - _ = s.FeatureCheck(reg) - - if err = checkV2Tenancy(s.UseV2Tenancy, req.Type); err != nil { - return nil, err - } - - if err := validateWildcardTenancy(req.Tenancy, req.NamePrefix); err != nil { - return nil, err - } - - // Error when partition scoped and namespace not empty. - if reg.Scope == resource.ScopePartition && req.Tenancy.Namespace != "" && req.Tenancy.Namespace != storage.Wildcard { - return nil, status.Errorf( - codes.InvalidArgument, - "partition scoped type %s cannot have a namespace. got: %s", - resource.ToGVK(req.Type), - req.Tenancy.Namespace, - ) - } - - return reg, nil + return status.Errorf(codes.InvalidArgument, "%s is required", field) } diff --git a/agent/grpc-external/services/resource/list_by_owner.go b/agent/grpc-external/services/resource/list_by_owner.go index bb1868a620385..2cc203e72c30b 100644 --- a/agent/grpc-external/services/resource/list_by_owner.go +++ b/agent/grpc-external/services/resource/list_by_owner.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resource @@ -10,66 +10,38 @@ import ( "google.golang.org/grpc/status" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/proto-public/pbresource" ) func (s *Server) ListByOwner(ctx context.Context, req *pbresource.ListByOwnerRequest) (*pbresource.ListByOwnerResponse, error) { - reg, err := s.ensureListByOwnerRequestValid(req) - if err != nil { + if err := validateListByOwnerRequest(req); err != nil { return nil, err } - // Convert v2 request tenancy to v1 for ACL subsystem. - entMeta := v2TenancyToV1EntMeta(req.Owner.Tenancy) - token := tokenFromContext(ctx) - - // Fill entMeta with token tenancy when empty. - authz, authzContext, err := s.getAuthorizer(token, entMeta) + _, err := s.resolveType(req.Owner.Type) if err != nil { return nil, err } - // Handle defaulting empty tenancy units from request. - v1EntMetaToV2Tenancy(reg, entMeta, req.Owner.Tenancy) - - // Check list ACL before verifying tenancy exists to not leak tenancy existence. - err = reg.ACLs.List(authz, authzContext) - switch { - case acl.IsErrPermissionDenied(err): - return nil, status.Error(codes.PermissionDenied, err.Error()) - case err != nil: - return nil, status.Errorf(codes.Internal, "failed list acl: %v", err) - } - - // Get owned resources. children, err := s.Backend.ListByOwner(ctx, req.Owner) if err != nil { return nil, status.Errorf(codes.Internal, "failed list by owner: %v", err) } + authz, err := s.getAuthorizer(tokenFromContext(ctx)) + if err != nil { + return nil, err + } + result := make([]*pbresource.Resource, 0) for _, child := range children { - // Retrieve child type's registration to access read ACL hook. - childReg, err := s.resolveType(child.Id.Type) + reg, err := s.resolveType(child.Id.Type) if err != nil { return nil, err } - // Rebuild authorizer if tenancy not identical between owner and child (child scope - // may be narrower). - childAuthz := authz - childAuthzContext := authzContext - if !resource.EqualTenancy(req.Owner.Tenancy, child.Id.Tenancy) { - childEntMeta := v2TenancyToV1EntMeta(child.Id.Tenancy) - childAuthz, childAuthzContext, err = s.getAuthorizer(token, childEntMeta) - if err != nil { - return nil, err - } - } - - // Filter out children that fail real ACL. - err = childReg.ACLs.Read(childAuthz, childAuthzContext, child.Id, child) + // ACL filter + err = reg.ACLs.Read(authz, child.Id) switch { case acl.IsErrPermissionDenied(err): continue @@ -82,31 +54,17 @@ func (s *Server) ListByOwner(ctx context.Context, req *pbresource.ListByOwnerReq return &pbresource.ListByOwnerResponse{Resources: result}, nil } -func (s *Server) ensureListByOwnerRequestValid(req *pbresource.ListByOwnerRequest) (*resource.Registration, error) { +func validateListByOwnerRequest(req *pbresource.ListByOwnerRequest) error { if req.Owner == nil { - return nil, status.Errorf(codes.InvalidArgument, "owner is required") + return status.Errorf(codes.InvalidArgument, "owner is required") } if err := validateId(req.Owner, "owner"); err != nil { - return nil, err + return err } if req.Owner.Uid == "" { - return nil, status.Errorf(codes.InvalidArgument, "owner uid is required") + return status.Errorf(codes.InvalidArgument, "owner uid is required") } - - reg, err := s.resolveType(req.Owner.Type) - if err != nil { - return nil, err - } - - if err = checkV2Tenancy(s.UseV2Tenancy, req.Owner.Type); err != nil { - return nil, err - } - - if err = validateScopedTenancy(reg.Scope, reg.Type, req.Owner.Tenancy, true); err != nil { - return nil, err - } - - return reg, nil + return nil } diff --git a/agent/grpc-external/services/resource/list_by_owner_test.go b/agent/grpc-external/services/resource/list_by_owner_test.go index 92167042ea154..218971a050daa 100644 --- a/agent/grpc-external/services/resource/list_by_owner_test.go +++ b/agent/grpc-external/services/resource/list_by_owner_test.go @@ -1,167 +1,73 @@ // // Copyright (c) HashiCorp, Inc. -// // SPDX-License-Identifier: BUSL-1.1 +// // SPDX-License-Identifier: MPL-2.0 -package resource_test +package resource import ( "context" "fmt" - "strings" "testing" - "github.com/oklog/ulid/v2" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "github.com/hashicorp/consul/acl" - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" - "github.com/hashicorp/consul/internal/resource/resourcetest" "github.com/hashicorp/consul/proto-public/pbresource" - pbdemo "github.com/hashicorp/consul/proto/private/pbdemo/v1" "github.com/hashicorp/consul/proto/private/prototest" -) -// TODO: Update all tests to use true/false table test for v2tenancy + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) func TestListByOwner_InputValidation(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - type testCase struct { - modFn func(artistId, recordlabelId, executiveId *pbresource.ID) *pbresource.ID - errContains string - } - testCases := map[string]testCase{ - "no owner": { - modFn: func(_, _, _ *pbresource.ID) *pbresource.ID { - return nil - }, - errContains: "owner is required", - }, - "no type": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Type = nil - return artistId - }, - errContains: "owner.type is required", - }, - "no name": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Name = "" - return artistId - }, - errContains: "owner.name invalid", - }, - "name mixed case": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Name = "U2" - return artistId - }, - errContains: "owner.name invalid", - }, - "name too long": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Name = strings.Repeat("n", resource.MaxNameLength+1) - return artistId - }, - errContains: "owner.name invalid", - }, - "partition mixed case": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Partition = "Default" - return artistId - }, - errContains: "owner.tenancy.partition invalid", - }, - "partition too long": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) - return artistId - }, - errContains: "owner.tenancy.partition invalid", - }, - "namespace mixed case": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Namespace = "Default" - return artistId - }, - errContains: "owner.tenancy.namespace invalid", - }, - "namespace too long": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) - return artistId - }, - errContains: "owner.tenancy.namespace invalid", - }, - "no uid": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Uid = "" - return artistId - }, - errContains: "owner uid is required", - }, - "partition scope with non-empty namespace": { - modFn: func(_, recordLabelId, _ *pbresource.ID) *pbresource.ID { - recordLabelId.Uid = ulid.Make().String() - recordLabelId.Tenancy.Namespace = "ishouldnothaveanamespace" - return recordLabelId - }, - errContains: "cannot have a namespace", - }, - "cluster scope with non-empty partition": { - modFn: func(_, _, executiveId *pbresource.ID) *pbresource.ID { - executiveId.Uid = ulid.Make().String() - executiveId.Tenancy.Partition = "ishouldnothaveapartition" - return executiveId - }, - errContains: "cannot have a partition", - }, - "cluster scope with non-empty namespace": { - modFn: func(_, _, executiveId *pbresource.ID) *pbresource.ID { - executiveId.Uid = ulid.Make().String() - executiveId.Tenancy.Namespace = "ishouldnothaveanamespace" - return executiveId - }, - errContains: "cannot have a namespace", + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) + + testCases := map[string]func(*pbresource.ListByOwnerRequest){ + "no owner": func(req *pbresource.ListByOwnerRequest) { req.Owner = nil }, + "no type": func(req *pbresource.ListByOwnerRequest) { req.Owner.Type = nil }, + "no tenancy": func(req *pbresource.ListByOwnerRequest) { req.Owner.Tenancy = nil }, + "no name": func(req *pbresource.ListByOwnerRequest) { req.Owner.Name = "" }, + "no uid": func(req *pbresource.ListByOwnerRequest) { req.Owner.Uid = "" }, + // clone necessary to not pollute DefaultTenancy + "tenancy partition not default": func(req *pbresource.ListByOwnerRequest) { + req.Owner.Tenancy = clone(req.Owner.Tenancy) + req.Owner.Tenancy.Partition = "" + }, + "tenancy namespace not default": func(req *pbresource.ListByOwnerRequest) { + req.Owner.Tenancy = clone(req.Owner.Tenancy) + req.Owner.Tenancy.Namespace = "" + }, + "tenancy peername not local": func(req *pbresource.ListByOwnerRequest) { + req.Owner.Tenancy = clone(req.Owner.Tenancy) + req.Owner.Tenancy.PeerName = "" }, } - for desc, tc := range testCases { + for desc, modFn := range testCases { t.Run(desc, func(t *testing.T) { - artist, err := demo.GenerateV2Artist() + res, err := demo.GenerateV2Artist() require.NoError(t, err) - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - - executive, err := demo.GenerateV1Executive("marvin", "CEO") - require.NoError(t, err) - - // Each test case picks which resource to use based on the resource type's scope. - req := &pbresource.ListByOwnerRequest{Owner: tc.modFn(artist.Id, recordLabel.Id, executive.Id)} + req := &pbresource.ListByOwnerRequest{Owner: res.Id} + modFn(req) _, err = client.ListByOwner(testContext(t), req) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, tc.errContains) }) } } func TestListByOwner_TypeNotRegistered(t *testing.T) { - client := svctest.NewResourceServiceBuilder().Run(t) + server := testServer(t) + client := testClient(t, server) _, err := client.ListByOwner(context.Background(), &pbresource.ListByOwnerRequest{ Owner: &pbresource.ID{ Type: demo.TypeV2Artist, - Tenancy: resource.DefaultNamespacedTenancy(), + Tenancy: demo.TenancyDefault, Uid: "bogus", Name: "bogus", }, @@ -172,9 +78,9 @@ func TestListByOwner_TypeNotRegistered(t *testing.T) { } func TestListByOwner_Empty(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + demo.RegisterTypes(server.Registry) + client := testClient(t, server) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -188,9 +94,9 @@ func TestListByOwner_Empty(t *testing.T) { } func TestListByOwner_Many(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + demo.RegisterTypes(server.Registry) + client := testClient(t, server) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -219,114 +125,8 @@ func TestListByOwner_Many(t *testing.T) { prototest.AssertElementsMatch(t, albums, rsp3.Resources) } -func TestListByOwner_OwnerTenancyDoesNotExist(t *testing.T) { - type testCase struct { - modFn func(artistId, recordlabelId *pbresource.ID) *pbresource.ID - } - tenancyCases := map[string]testCase{ - "namespace scoped owner with non-existent partition": { - modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Tenancy.Partition = "boguspartition" - return id - }, - }, - "namespace scoped owner with non-existent namespace": { - modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Tenancy.Namespace = "bogusnamespace" - return id - }, - }, - "partition scoped owner with non-existent partition": { - modFn: func(_, recordLabelId *pbresource.ID) *pbresource.ID { - id := clone(recordLabelId) - id.Tenancy.Partition = "boguspartition" - return id - }, - }, - } - for desc, tc := range tenancyCases { - t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - recordLabel := resourcetest.Resource(demo.TypeV1RecordLabel, "looney-tunes"). - WithTenancy(resource.DefaultPartitionedTenancy()). - WithData(t, &pbdemo.RecordLabel{Name: "Looney Tunes"}). - Write(t, client) - - artist := resourcetest.Resource(demo.TypeV1Artist, "blur"). - WithTenancy(resource.DefaultNamespacedTenancy()). - WithData(t, &pbdemo.Artist{Name: "Blur"}). - WithOwner(recordLabel.Id). - Write(t, client) - - // Verify non-existant tenancy units in owner return empty list. - rsp, err := client.ListByOwner(testContext(t), &pbresource.ListByOwnerRequest{Owner: tc.modFn(artist.Id, recordLabel.Id)}) - require.NoError(t, err) - require.Empty(t, rsp.Resources) - }) - } -} - -func TestListByOwner_Tenancy_Defaults_And_Normalization(t *testing.T) { - for tenancyDesc, modFn := range tenancyCases() { - t.Run(tenancyDesc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Create partition scoped recordLabel. - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - rsp1, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: recordLabel}) - require.NoError(t, err) - recordLabel = rsp1.Resource - - // Create namespace scoped artist. - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - rsp2, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: artist}) - require.NoError(t, err) - artist = rsp2.Resource - - // Owner will be either partition scoped (recordLabel) or namespace scoped (artist) based on testcase. - moddedOwnerId := modFn(artist.Id, recordLabel.Id) - var ownerId *pbresource.ID - - // Avoid using the modded id when linking owner to child. - switch { - case proto.Equal(moddedOwnerId.Type, demo.TypeV2Artist): - ownerId = artist.Id - case proto.Equal(moddedOwnerId.Type, demo.TypeV1RecordLabel): - ownerId = recordLabel.Id - default: - require.Fail(t, "unexpected resource type") - } - - // Link owner to child. - album, err := demo.GenerateV2Album(ownerId) - require.NoError(t, err) - rsp3, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: album}) - require.NoError(t, err) - album = rsp3.Resource - - // Test - listRsp, err := client.ListByOwner(testContext(t), &pbresource.ListByOwnerRequest{ - Owner: moddedOwnerId, - }) - require.NoError(t, err) - - // Verify child album always returned. - prototest.AssertDeepEqual(t, album, listRsp.Resources[0]) - }) - } -} - func TestListByOwner_ACL_PerTypeDenied(t *testing.T) { - authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.Album/" { policy = "deny" }`, demo.ArtistV2ListPolicy) + authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.Album/" { policy = "deny" }`) _, rsp, err := roundTripListByOwner(t, authz) // verify resource filtered out, hence no results @@ -335,7 +135,7 @@ func TestListByOwner_ACL_PerTypeDenied(t *testing.T) { } func TestListByOwner_ACL_PerTypeAllowed(t *testing.T) { - authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.Album/" { policy = "read" }`, demo.ArtistV2ListPolicy) + authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.Album/" { policy = "read" }`) album, rsp, err := roundTripListByOwner(t, authz) // verify resource not filtered out @@ -346,9 +146,9 @@ func TestListByOwner_ACL_PerTypeAllowed(t *testing.T) { // roundtrip a ListByOwner which attempts to return a single resource func roundTripListByOwner(t *testing.T, authz acl.Authorizer) (*pbresource.Resource, *pbresource.ListByOwnerResponse, error) { - builder := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes) - client := builder.Run(t) + server := testServer(t) + client := testClient(t, server) + demo.RegisterTypes(server.Registry) artist, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -364,11 +164,10 @@ func roundTripListByOwner(t *testing.T, authz acl.Authorizer) (*pbresource.Resou album = rsp2.Resource require.NoError(t, err) - // Mock has to be put in place after the above writes so writes will succeed. - mockACLResolver := &svc.MockACLResolver{} + mockACLResolver := &MockACLResolver{} mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). Return(authz, nil) - builder.ServiceImpl().ACLResolver = mockACLResolver + server.ACLResolver = mockACLResolver rsp3, err := client.ListByOwner(testContext(t), &pbresource.ListByOwnerRequest{Owner: artist.Id}) return album, rsp3, err diff --git a/agent/grpc-external/services/resource/list_test.go b/agent/grpc-external/services/resource/list_test.go index efcfa3cafd8c4..4d6b50951b758 100644 --- a/agent/grpc-external/services/resource/list_test.go +++ b/agent/grpc-external/services/resource/list_test.go @@ -1,107 +1,59 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -package resource_test +package resource import ( "context" "fmt" - "strconv" - "strings" "testing" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - "github.com/hashicorp/consul/acl" - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/agent/grpc-external/testutils" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto/private/prototest" -) -// TODO: Update all tests to use true/false table test for v2tenancy + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) func TestList_InputValidation(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) - type testCase struct { - modReqFn func(req *pbresource.ListRequest) - errContains string - } + demo.RegisterTypes(server.Registry) - testCases := map[string]testCase{ - "no type": { - modReqFn: func(req *pbresource.ListRequest) { req.Type = nil }, - errContains: "type is required", - }, - "no tenancy": { - modReqFn: func(req *pbresource.ListRequest) { req.Tenancy = nil }, - errContains: "tenancy is required", - }, - "partition mixed case": { - modReqFn: func(req *pbresource.ListRequest) { req.Tenancy.Partition = "Default" }, - errContains: "tenancy.partition invalid", - }, - "partition too long": { - modReqFn: func(req *pbresource.ListRequest) { - req.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) - }, - errContains: "tenancy.partition invalid", - }, - "namespace mixed case": { - modReqFn: func(req *pbresource.ListRequest) { req.Tenancy.Namespace = "Default" }, - errContains: "tenancy.namespace invalid", - }, - "namespace too long": { - modReqFn: func(req *pbresource.ListRequest) { - req.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) - }, - errContains: "tenancy.namespace invalid", - }, - "name_prefix mixed case": { - modReqFn: func(req *pbresource.ListRequest) { req.NamePrefix = "Violator" }, - errContains: "name_prefix invalid", - }, - "partitioned resource provides non-empty namespace": { - modReqFn: func(req *pbresource.ListRequest) { - req.Type = demo.TypeV1RecordLabel - req.Tenancy.Namespace = "bad" - }, - errContains: "cannot have a namespace", - }, + testCases := map[string]func(*pbresource.ListRequest){ + "no type": func(req *pbresource.ListRequest) { req.Type = nil }, + "no tenancy": func(req *pbresource.ListRequest) { req.Tenancy = nil }, } - for desc, tc := range testCases { + for desc, modFn := range testCases { t.Run(desc, func(t *testing.T) { req := &pbresource.ListRequest{ Type: demo.TypeV2Album, - Tenancy: resource.DefaultNamespacedTenancy(), + Tenancy: demo.TenancyDefault, } - tc.modReqFn(req) + modFn(req) _, err := client.List(testContext(t), req) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, tc.errContains) }) } } func TestList_TypeNotFound(t *testing.T) { - client := svctest.NewResourceServiceBuilder().Run(t) + server := testServer(t) + client := testClient(t, server) _, err := client.List(context.Background(), &pbresource.ListRequest{ Type: demo.TypeV2Artist, - Tenancy: resource.DefaultNamespacedTenancy(), + Tenancy: demo.TenancyDefault, NamePrefix: "", }) require.Error(t, err) @@ -112,13 +64,13 @@ func TestList_TypeNotFound(t *testing.T) { func TestList_Empty(t *testing.T) { for desc, tc := range listTestCases() { t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + demo.RegisterTypes(server.Registry) + client := testClient(t, server) rsp, err := client.List(tc.ctx, &pbresource.ListRequest{ Type: demo.TypeV1Artist, - Tenancy: resource.DefaultNamespacedTenancy(), + Tenancy: demo.TenancyDefault, NamePrefix: "", }) require.NoError(t, err) @@ -130,9 +82,9 @@ func TestList_Empty(t *testing.T) { func TestList_Many(t *testing.T) { for desc, tc := range listTestCases() { t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + demo.RegisterTypes(server.Registry) + client := testClient(t, server) resources := make([]*pbresource.Resource, 10) for i := 0; i < len(resources); i++ { @@ -150,7 +102,7 @@ func TestList_Many(t *testing.T) { rsp, err := client.List(tc.ctx, &pbresource.ListRequest{ Type: demo.TypeV2Artist, - Tenancy: resource.DefaultNamespacedTenancy(), + Tenancy: demo.TenancyDefault, NamePrefix: "", }) require.NoError(t, err) @@ -159,103 +111,17 @@ func TestList_Many(t *testing.T) { } } -func TestList_NamePrefix(t *testing.T) { - for desc, tc := range listTestCases() { - t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - expectedResources := []*pbresource.Resource{} - - namePrefixIndex := 0 - // create a name prefix that is always present - namePrefix := fmt.Sprintf("%s-", strconv.Itoa(namePrefixIndex)) - for i := 0; i < 10; i++ { - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - - // Prevent test flakes if the generated names collide. - artist.Id.Name = fmt.Sprintf("%d-%s", i, artist.Id.Name) - - rsp, err := client.Write(tc.ctx, &pbresource.WriteRequest{Resource: artist}) - require.NoError(t, err) - - // only matching name prefix are expected - if i == namePrefixIndex { - expectedResources = append(expectedResources, rsp.Resource) - } - } - - rsp, err := client.List(tc.ctx, &pbresource.ListRequest{ - Type: demo.TypeV2Artist, - Tenancy: resource.DefaultNamespacedTenancy(), - NamePrefix: namePrefix, - }) - - require.NoError(t, err) - prototest.AssertElementsMatch(t, expectedResources, rsp.Resources) - }) - } -} - -func TestList_Tenancy_Defaults_And_Normalization(t *testing.T) { - // Test units of tenancy get defaulted correctly when empty. - ctx := context.Background() - for desc, tc := range wildcardTenancyCases() { - t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Write partition scoped record label - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - recordLabelRsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: recordLabel}) - require.NoError(t, err) - - // Write namespace scoped artist - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - artistRsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) - require.NoError(t, err) - - // Write a cluster scoped Executive - executive, err := demo.GenerateV1Executive("king-arthur", "CEO") - require.NoError(t, err) - executiveRsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: executive}) - require.NoError(t, err) - - // List and verify correct resource returned for empty tenancy units. - listRsp, err := client.List(ctx, &pbresource.ListRequest{ - Type: tc.typ, - Tenancy: tc.tenancy, - }) - require.NoError(t, err) - require.Len(t, listRsp.Resources, 1) - switch tc.typ { - case demo.TypeV1RecordLabel: - prototest.AssertDeepEqual(t, recordLabelRsp.Resource, listRsp.Resources[0]) - case demo.TypeV1Artist: - prototest.AssertDeepEqual(t, artistRsp.Resource, listRsp.Resources[0]) - case demo.TypeV1Executive: - prototest.AssertDeepEqual(t, executiveRsp.Resource, listRsp.Resources[0]) - } - }) - } -} - func TestList_GroupVersionMismatch(t *testing.T) { for desc, tc := range listTestCases() { t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + demo.RegisterTypes(server.Registry) + client := testClient(t, server) artist, err := demo.GenerateV2Artist() require.NoError(t, err) - _, err = client.Write(tc.ctx, &pbresource.WriteRequest{Resource: artist}) + _, err = server.Backend.WriteCAS(tc.ctx, artist) require.NoError(t, err) rsp, err := client.List(tc.ctx, &pbresource.ListRequest{ @@ -273,7 +139,7 @@ func TestList_VerifyReadConsistencyArg(t *testing.T) { // Uses a mockBackend instead of the inmem Backend to verify the ReadConsistency argument is set correctly. for desc, tc := range listTestCases() { t.Run(desc, func(t *testing.T) { - mockBackend := svc.NewMockBackend(t) + mockBackend := NewMockBackend(t) server := testServer(t) server.Backend = mockBackend demo.RegisterTypes(server.Registry) @@ -334,24 +200,25 @@ func TestList_ACL_ListAllowed_ReadAllowed(t *testing.T) { prototest.AssertDeepEqual(t, artist, rsp.Resources[0]) } +// roundtrip a List which attempts to return a single resource func roundTripList(t *testing.T, authz acl.Authorizer) (*pbresource.Resource, *pbresource.ListResponse, error) { + server := testServer(t) + client := testClient(t, server) ctx := testContext(t) - builder := svctest.NewResourceServiceBuilder().WithRegisterFns(demo.RegisterTypes) - client := builder.Run(t) + + mockACLResolver := &MockACLResolver{} + mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(authz, nil) + server.ACLResolver = mockACLResolver + demo.RegisterTypes(server.Registry) artist, err := demo.GenerateV2Artist() require.NoError(t, err) - rsp1, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) + artist, err = server.Backend.WriteCAS(ctx, artist) require.NoError(t, err) - // Put ACLResolver in place after above writes so writes not subject to ACLs - mockACLResolver := &svc.MockACLResolver{} - mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). - Return(authz, nil) - builder.ServiceImpl().Config.ACLResolver = mockACLResolver - - rsp2, err := client.List( + rsp, err := client.List( ctx, &pbresource.ListRequest{ Type: artist.Id.Type, @@ -359,7 +226,8 @@ func roundTripList(t *testing.T, authz acl.Authorizer) (*pbresource.Resource, *p NamePrefix: "", }, ) - return rsp1.Resource, rsp2, err + + return artist, rsp, err } type listTestCase struct { diff --git a/agent/grpc-external/services/resource/mock_Registry.go b/agent/grpc-external/services/resource/mock_Registry.go index c97b5428a3034..288e8bcde8b67 100644 --- a/agent/grpc-external/services/resource/mock_Registry.go +++ b/agent/grpc-external/services/resource/mock_Registry.go @@ -43,22 +43,6 @@ func (_m *MockRegistry) Resolve(typ *pbresource.Type) (internalresource.Registra return r0, r1 } -// Types provides a mock function with given fields: -func (_m *MockRegistry) Types() []internalresource.Registration { - ret := _m.Called() - - var r0 []internalresource.Registration - if rf, ok := ret.Get(0).(func() []internalresource.Registration); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]internalresource.Registration) - } - } - - return r0 -} - type mockConstructorTestingTNewMockRegistry interface { mock.TestingT Cleanup(func()) diff --git a/agent/grpc-external/services/resource/mock_TenancyBridge.go b/agent/grpc-external/services/resource/mock_TenancyBridge.go deleted file mode 100644 index 662b4004b99f7..0000000000000 --- a/agent/grpc-external/services/resource/mock_TenancyBridge.go +++ /dev/null @@ -1,121 +0,0 @@ -// Code generated by mockery v2.20.0. DO NOT EDIT. - -package resource - -import mock "github.com/stretchr/testify/mock" - -// MockTenancyBridge is an autogenerated mock type for the TenancyBridge type -type MockTenancyBridge struct { - mock.Mock -} - -// IsNamespaceMarkedForDeletion provides a mock function with given fields: partition, namespace -func (_m *MockTenancyBridge) IsNamespaceMarkedForDeletion(partition string, namespace string) (bool, error) { - ret := _m.Called(partition, namespace) - - var r0 bool - var r1 error - if rf, ok := ret.Get(0).(func(string, string) (bool, error)); ok { - return rf(partition, namespace) - } - if rf, ok := ret.Get(0).(func(string, string) bool); ok { - r0 = rf(partition, namespace) - } else { - r0 = ret.Get(0).(bool) - } - - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(partition, namespace) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IsPartitionMarkedForDeletion provides a mock function with given fields: partition -func (_m *MockTenancyBridge) IsPartitionMarkedForDeletion(partition string) (bool, error) { - ret := _m.Called(partition) - - var r0 bool - var r1 error - if rf, ok := ret.Get(0).(func(string) (bool, error)); ok { - return rf(partition) - } - if rf, ok := ret.Get(0).(func(string) bool); ok { - r0 = rf(partition) - } else { - r0 = ret.Get(0).(bool) - } - - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(partition) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NamespaceExists provides a mock function with given fields: partition, namespace -func (_m *MockTenancyBridge) NamespaceExists(partition string, namespace string) (bool, error) { - ret := _m.Called(partition, namespace) - - var r0 bool - var r1 error - if rf, ok := ret.Get(0).(func(string, string) (bool, error)); ok { - return rf(partition, namespace) - } - if rf, ok := ret.Get(0).(func(string, string) bool); ok { - r0 = rf(partition, namespace) - } else { - r0 = ret.Get(0).(bool) - } - - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(partition, namespace) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PartitionExists provides a mock function with given fields: partition -func (_m *MockTenancyBridge) PartitionExists(partition string) (bool, error) { - ret := _m.Called(partition) - - var r0 bool - var r1 error - if rf, ok := ret.Get(0).(func(string) (bool, error)); ok { - return rf(partition) - } - if rf, ok := ret.Get(0).(func(string) bool); ok { - r0 = rf(partition) - } else { - r0 = ret.Get(0).(bool) - } - - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(partition) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewMockTenancyBridge interface { - mock.TestingT - Cleanup(func()) -} - -// NewMockTenancyBridge creates a new instance of MockTenancyBridge. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMockTenancyBridge(t mockConstructorTestingTNewMockTenancyBridge) *MockTenancyBridge { - mock := &MockTenancyBridge{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/agent/grpc-external/services/resource/mutate_and_validate.go b/agent/grpc-external/services/resource/mutate_and_validate.go deleted file mode 100644 index 7aa3519f38485..0000000000000 --- a/agent/grpc-external/services/resource/mutate_and_validate.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package resource - -import ( - "context" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -func (s *Server) MutateAndValidate(ctx context.Context, req *pbresource.MutateAndValidateRequest) (*pbresource.MutateAndValidateResponse, error) { - tenancyMarkedForDeletion, err := s.mutateAndValidate(ctx, req.Resource, false) - if err != nil { - return nil, err - } - - if tenancyMarkedForDeletion { - return nil, status.Errorf( - codes.InvalidArgument, - "tenancy marked for deletion: %s/%s", - req.Resource.Id.Tenancy.Partition, - req.Resource.Id.Tenancy.Namespace, - ) - } - return &pbresource.MutateAndValidateResponse{Resource: req.Resource}, nil -} - -// private DRY impl that is used by both the Write and MutateAndValidate RPCs. -func (s *Server) mutateAndValidate(ctx context.Context, res *pbresource.Resource, enforceLicenseCheck bool) (tenancyMarkedForDeletion bool, err error) { - reg, err := s.ensureResourceValid(res, enforceLicenseCheck) - if err != nil { - return false, err - } - - v1EntMeta := v2TenancyToV1EntMeta(res.Id.Tenancy) - authz, authzContext, err := s.getAuthorizer(tokenFromContext(ctx), v1EntMeta) - if err != nil { - return false, err - } - v1EntMetaToV2Tenancy(reg, v1EntMeta, res.Id.Tenancy) - - // Check the user sent the correct type of data. - if res.Data != nil && !res.Data.MessageIs(reg.Proto) { - got := strings.TrimPrefix(res.Data.TypeUrl, "type.googleapis.com/") - - return false, status.Errorf( - codes.InvalidArgument, - "resource.data is of wrong type (expected=%q, got=%q)", - reg.Proto.ProtoReflect().Descriptor().FullName(), - got, - ) - } - - if err = reg.Mutate(res); err != nil { - return false, status.Errorf(codes.Internal, "failed mutate hook: %v", err.Error()) - } - - if err = reg.Validate(res); err != nil { - return false, status.Error(codes.InvalidArgument, err.Error()) - } - - // ACL check comes before tenancy existence checks to not leak tenancy "existence". - err = reg.ACLs.Write(authz, authzContext, res) - switch { - case acl.IsErrPermissionDenied(err): - return false, status.Error(codes.PermissionDenied, err.Error()) - case err != nil: - return false, status.Errorf(codes.Internal, "failed write acl: %v", err) - } - - // Check tenancy exists for the V2 resource - if err = tenancyExists(reg, s.TenancyBridge, res.Id.Tenancy, codes.InvalidArgument); err != nil { - return false, err - } - - // This is used later in the "create" and "update" paths to block non-delete related writes - // when a tenancy unit has been marked for deletion. - tenancyMarkedForDeletion, err = isTenancyMarkedForDeletion(reg, s.TenancyBridge, res.Id.Tenancy) - if err != nil { - return false, status.Errorf(codes.Internal, "failed tenancy marked for deletion check: %v", err) - } - if tenancyMarkedForDeletion { - return true, nil - } - return false, nil -} - -func (s *Server) ensureResourceValid(res *pbresource.Resource, enforceLicenseCheck bool) (*resource.Registration, error) { - var field string - switch { - case res == nil: - field = "resource" - case res.Id == nil: - field = "resource.id" - } - - if field != "" { - return nil, status.Errorf(codes.InvalidArgument, "%s is required", field) - } - - if err := validateId(res.Id, "resource.id"); err != nil { - return nil, err - } - - if res.Owner != nil { - if err := validateId(res.Owner, "resource.owner"); err != nil { - return nil, err - } - } - - // Check type exists. - reg, err := s.resolveType(res.Id.Type) - if err != nil { - return nil, err - } - - // Since this is shared by Write and MutateAndValidate, only fail the operation - // if it's a write operation and the feature is not allowed by the license. - if err = s.FeatureCheck(reg); err != nil && enforceLicenseCheck { - return nil, err - } - - if err = checkV2Tenancy(s.UseV2Tenancy, res.Id.Type); err != nil { - return nil, err - } - - // Check scope - if reg.Scope == resource.ScopePartition && res.Id.Tenancy.Namespace != "" { - return nil, status.Errorf( - codes.InvalidArgument, - "partition scoped resource %s cannot have a namespace. got: %s", - resource.ToGVK(res.Id.Type), - res.Id.Tenancy.Namespace, - ) - } - - return reg, nil -} diff --git a/agent/grpc-external/services/resource/mutate_and_validate_test.go b/agent/grpc-external/services/resource/mutate_and_validate_test.go deleted file mode 100644 index 8f163e778c790..0000000000000 --- a/agent/grpc-external/services/resource/mutate_and_validate_test.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package resource_test - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" - "github.com/hashicorp/consul/internal/resource/demo" - "github.com/hashicorp/consul/proto-public/pbresource" - pbdemov2 "github.com/hashicorp/consul/proto/private/pbdemo/v2" - "github.com/hashicorp/consul/proto/private/prototest" -) - -func TestMutateAndValidate_InputValidation(t *testing.T) { - run := func(t *testing.T, client pbresource.ResourceServiceClient, tc resourceValidTestCase) { - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - - req := &pbresource.MutateAndValidateRequest{Resource: tc.modFn(artist, recordLabel)} - _, err = client.MutateAndValidate(testContext(t), req) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, tc.errContains) - } - - for _, v2tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - WithV2Tenancy(v2tenancy). - Run(t) - - for desc, tc := range resourceValidTestCases(t) { - t.Run(desc, func(t *testing.T) { - run(t, client, tc) - }) - } - }) - } -} - -func TestMutateAndValidate_OwnerValidation(t *testing.T) { - run := func(t *testing.T, client pbresource.ResourceServiceClient, tc ownerValidTestCase) { - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - - album, err := demo.GenerateV2Album(artist.Id) - require.NoError(t, err) - - tc.modFn(album) - - _, err = client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: album}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, tc.errorContains) - } - - for _, v2tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - WithV2Tenancy(v2tenancy). - Run(t) - - for desc, tc := range ownerValidationTestCases(t) { - t.Run(desc, func(t *testing.T) { - run(t, client, tc) - }) - } - }) - } -} - -func TestMutateAndValidate_TypeNotFound(t *testing.T) { - run := func(t *testing.T, client pbresource.ResourceServiceClient) { - res, err := demo.GenerateV2Artist() - require.NoError(t, err) - - _, err = client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: res}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered") - } - - for _, v2tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) { - client := svctest.NewResourceServiceBuilder().WithV2Tenancy(v2tenancy).Run(t) - run(t, client) - }) - } -} - -func TestMutateAndValidate_Success(t *testing.T) { - run := func(t *testing.T, client pbresource.ResourceServiceClient, tc mavOrWriteSuccessTestCase) { - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - - rsp, err := client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: tc.modFn(artist, recordLabel)}) - require.NoError(t, err) - prototest.AssertDeepEqual(t, tc.expectedTenancy, rsp.Resource.Id.Tenancy) - } - - for _, v2tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - WithV2Tenancy(v2tenancy). - Run(t) - - for desc, tc := range mavOrWriteSuccessTestCases(t) { - t.Run(desc, func(t *testing.T) { - run(t, client, tc) - }) - } - }) - } -} - -func TestMutateAndValidate_Mutate(t *testing.T) { - for _, v2tenancy := range []bool{false, true} { - t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - WithV2Tenancy(v2tenancy). - Run(t) - - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - - artistData := &pbdemov2.Artist{} - artist.Data.UnmarshalTo(artistData) - require.NoError(t, err) - - // mutate hook sets genre to disco when unspecified - artistData.Genre = pbdemov2.Genre_GENRE_UNSPECIFIED - artist.Data.MarshalFrom(artistData) - require.NoError(t, err) - - rsp, err := client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: artist}) - require.NoError(t, err) - - // verify mutate hook set genre to disco - require.NoError(t, rsp.Resource.Data.UnmarshalTo(artistData)) - require.Equal(t, pbdemov2.Genre_GENRE_DISCO, artistData.Genre) - }) - } -} - -func TestMutateAndValidate_Tenancy_NotFound(t *testing.T) { - for desc, tc := range mavOrWriteTenancyNotFoundTestCases(t) { - t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(true). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - - _, err = client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: tc.modFn(artist, recordLabel)}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), tc.errContains) - }) - } -} - -func TestMutateAndValidate_TenancyMarkedForDeletion_Fails(t *testing.T) { - for desc, tc := range mavOrWriteTenancyMarkedForDeletionTestCases(t) { - t.Run(desc, func(t *testing.T) { - server := testServer(t) - client := testClient(t, server) - demo.RegisterTypes(server.Registry) - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - recordLabel.Id.Tenancy.Partition = "ap1" - - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - artist.Id.Tenancy.Partition = "ap1" - artist.Id.Tenancy.Namespace = "ns1" - - mockTenancyBridge := &svc.MockTenancyBridge{} - mockTenancyBridge.On("PartitionExists", "ap1").Return(true, nil) - mockTenancyBridge.On("NamespaceExists", "ap1", "ns1").Return(true, nil) - server.TenancyBridge = mockTenancyBridge - - _, err = client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: tc.modFn(artist, recordLabel, mockTenancyBridge)}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), tc.errContains) - }) - } -} diff --git a/agent/grpc-external/services/resource/read.go b/agent/grpc-external/services/resource/read.go index 48d07a337569a..c75779183cb8d 100644 --- a/agent/grpc-external/services/resource/read.go +++ b/agent/grpc-external/services/resource/read.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resource @@ -11,109 +11,55 @@ import ( "google.golang.org/grpc/status" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" ) func (s *Server) Read(ctx context.Context, req *pbresource.ReadRequest) (*pbresource.ReadResponse, error) { - // Light first pass validation based on what user passed in and not much more. - reg, err := s.ensureReadRequestValid(req) - if err != nil { + if err := validateReadRequest(req); err != nil { return nil, err } - // acl.EnterpriseMeta acl.AuthorizerContext follow rules for V1 resources since they integrate with the V1 acl subsystem. - // pbresource.Tenancy follows rules for V2 resources and the Resource service. - // Example: - // - // A CE namespace scoped resource: - // V1: EnterpriseMeta{} - // V2: Tenancy {Partition: "default", Namespace: "default"} - // - // An ENT namespace scoped resource: - // V1: EnterpriseMeta{Partition: "default", Namespace: "default"} - // V2: Tenancy {Partition: "default", Namespace: "default"} - // - // It is necessary to convert back and forth depending on which component supports which version, V1 or V2. - entMeta := v2TenancyToV1EntMeta(req.Id.Tenancy) - authz, authzContext, err := s.getAuthorizer(tokenFromContext(ctx), entMeta) + // check type exists + reg, err := s.resolveType(req.Id.Type) if err != nil { return nil, err } - v1EntMetaToV2Tenancy(reg, entMeta, req.Id.Tenancy) + authz, err := s.getAuthorizer(tokenFromContext(ctx)) + if err != nil { + return nil, err + } - // ACL check usually comes before tenancy existence checks to not leak - // tenancy "existence", unless the ACL check requires the data payload - // to function. - authzNeedsData := false - err = reg.ACLs.Read(authz, authzContext, req.Id, nil) + // check acls + err = reg.ACLs.Read(authz, req.Id) switch { - case errors.Is(err, resource.ErrNeedResource): - authzNeedsData = true - err = nil case acl.IsErrPermissionDenied(err): return nil, status.Error(codes.PermissionDenied, err.Error()) case err != nil: return nil, status.Errorf(codes.Internal, "failed read acl: %v", err) } - // Check tenancy exists for the V2 resource. - if err = tenancyExists(reg, s.TenancyBridge, req.Id.Tenancy, codes.NotFound); err != nil { - return nil, err - } - resource, err := s.Backend.Read(ctx, readConsistencyFrom(ctx), req.Id) switch { + case err == nil: + return &pbresource.ReadResponse{Resource: resource}, nil case errors.Is(err, storage.ErrNotFound): return nil, status.Error(codes.NotFound, err.Error()) case errors.As(err, &storage.GroupVersionMismatchError{}): return nil, status.Error(codes.InvalidArgument, err.Error()) - case err != nil: + default: return nil, status.Errorf(codes.Internal, "failed read: %v", err) } - - if authzNeedsData { - err = reg.ACLs.Read(authz, authzContext, req.Id, resource) - switch { - case acl.IsErrPermissionDenied(err): - return nil, status.Error(codes.PermissionDenied, err.Error()) - case err != nil: - return nil, status.Errorf(codes.Internal, "failed read acl: %v", err) - } - } - - return &pbresource.ReadResponse{Resource: resource}, nil } -func (s *Server) ensureReadRequestValid(req *pbresource.ReadRequest) (*resource.Registration, error) { +func validateReadRequest(req *pbresource.ReadRequest) error { if req.Id == nil { - return nil, status.Errorf(codes.InvalidArgument, "id is required") + return status.Errorf(codes.InvalidArgument, "id is required") } if err := validateId(req.Id, "id"); err != nil { - return nil, err + return err } - - // Check type exists. - reg, err := s.resolveType(req.Id.Type) - if err != nil { - return nil, err - } - - // Ignore return value since read ops are allowed but will log a warning if the feature is - // not enabled in the license. - _ = s.FeatureCheck(reg) - - if err = checkV2Tenancy(s.UseV2Tenancy, req.Id.Type); err != nil { - return nil, err - } - - // Check scope - if err = validateScopedTenancy(reg.Scope, req.Id.Type, req.Id.Tenancy, false); err != nil { - return nil, err - } - - return reg, nil + return nil } diff --git a/agent/grpc-external/services/resource/read_test.go b/agent/grpc-external/services/resource/read_test.go index b7367e6390319..cca911ec15b5b 100644 --- a/agent/grpc-external/services/resource/read_test.go +++ b/agent/grpc-external/services/resource/read_test.go @@ -1,13 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -package resource_test +package resource import ( "context" - "fmt" - "strings" - "sync" "testing" "github.com/stretchr/testify/mock" @@ -15,142 +12,57 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl/resolver" - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" - "github.com/hashicorp/consul/agent/grpc-external/testutils" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto/private/prototest" - "github.com/hashicorp/consul/sdk/testutil" ) -// TODO: Update all tests to use true/false table test for v2tenancy - func TestRead_InputValidation(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) - type testCase struct { - modFn func(artistId, recordlabelId, executiveId *pbresource.ID) *pbresource.ID - errContains string - } + demo.RegisterTypes(server.Registry) - testCases := map[string]testCase{ - "no id": { - modFn: func(_, _, _ *pbresource.ID) *pbresource.ID { - return nil - }, - errContains: "id is required", - }, - "no type": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Type = nil - return artistId - }, - errContains: "id.type is required", - }, - "no name": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Name = "" - return artistId - }, - errContains: "id.name invalid", - }, - "name is mixed case": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Name = "MixedCaseNotAllowed" - return artistId - }, - errContains: "id.name invalid", - }, - "name too long": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Name = strings.Repeat("a", resource.MaxNameLength+1) - return artistId - }, - errContains: "id.name invalid", - }, - "partition is mixed case": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Partition = "Default" - return artistId - }, - errContains: "id.tenancy.partition invalid", + testCases := map[string]func(*pbresource.ReadRequest){ + "no id": func(req *pbresource.ReadRequest) { req.Id = nil }, + "no type": func(req *pbresource.ReadRequest) { req.Id.Type = nil }, + "no tenancy": func(req *pbresource.ReadRequest) { req.Id.Tenancy = nil }, + "no name": func(req *pbresource.ReadRequest) { req.Id.Name = "" }, + // clone necessary to not pollute DefaultTenancy + "tenancy partition not default": func(req *pbresource.ReadRequest) { + req.Id.Tenancy = clone(req.Id.Tenancy) + req.Id.Tenancy.Partition = "" }, - "partition too long": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) - return artistId - }, - errContains: "id.tenancy.partition invalid", + "tenancy namespace not default": func(req *pbresource.ReadRequest) { + req.Id.Tenancy = clone(req.Id.Tenancy) + req.Id.Tenancy.Namespace = "" }, - "namespace is mixed case": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Namespace = "Default" - return artistId - }, - errContains: "id.tenancy.namespace invalid", - }, - "namespace too long": { - modFn: func(artistId, _, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) - return artistId - }, - errContains: "id.tenancy.namespace invalid", - }, - "partition scope with non-empty namespace": { - modFn: func(_, recordLabelId, _ *pbresource.ID) *pbresource.ID { - recordLabelId.Tenancy.Namespace = "ishouldnothaveanamespace" - return recordLabelId - }, - errContains: "cannot have a namespace", - }, - "cluster scope with non-empty partition": { - modFn: func(_, _, executiveId *pbresource.ID) *pbresource.ID { - executiveId.Tenancy = &pbresource.Tenancy{Partition: resource.DefaultPartitionName} - return executiveId - }, - errContains: "cannot have a partition", - }, - "cluster scope with non-empty namespace": { - modFn: func(_, _, executiveId *pbresource.ID) *pbresource.ID { - executiveId.Tenancy = &pbresource.Tenancy{Namespace: resource.DefaultNamespaceName} - return executiveId - }, - errContains: "cannot have a namespace", + "tenancy peername not local": func(req *pbresource.ReadRequest) { + req.Id.Tenancy = clone(req.Id.Tenancy) + req.Id.Tenancy.PeerName = "" }, } - for desc, tc := range testCases { + for desc, modFn := range testCases { t.Run(desc, func(t *testing.T) { - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - - executive, err := demo.GenerateV1Executive("music-man", "CEO") + res, err := demo.GenerateV2Artist() require.NoError(t, err) - // Each test case picks which resource to use based on the resource type's scope. - req := &pbresource.ReadRequest{Id: tc.modFn(artist.Id, recordLabel.Id, executive.Id)} + req := &pbresource.ReadRequest{Id: res.Id} + modFn(req) _, err = client.Read(testContext(t), req) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, tc.errContains) }) } } func TestRead_TypeNotFound(t *testing.T) { - server := svc.NewServer(svc.Config{Registry: resource.NewRegistry()}) + server := NewServer(Config{Registry: resource.NewRegistry()}) client := testClient(t, server) artist, err := demo.GenerateV2Artist() @@ -165,67 +77,18 @@ func TestRead_TypeNotFound(t *testing.T) { func TestRead_ResourceNotFound(t *testing.T) { for desc, tc := range readTestCases() { t.Run(desc, func(t *testing.T) { - type tenancyCase struct { - modFn func(artistId, recordlabelId *pbresource.ID) *pbresource.ID - errContains string - } - tenancyCases := map[string]tenancyCase{ - "resource not found by name": { - modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { - artistId.Name = "bogusname" - return artistId - }, - errContains: "resource not found", - }, - "partition not found when namespace scoped": { - modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Tenancy.Partition = "boguspartition" - return id - }, - errContains: "partition not found", - }, - "namespace not found when namespace scoped": { - modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Tenancy.Namespace = "bogusnamespace" - return id - }, - errContains: "namespace not found", - }, - "partition not found when partition scoped": { - modFn: func(_, recordLabelId *pbresource.ID) *pbresource.ID { - id := clone(recordLabelId) - id.Tenancy.Partition = "boguspartition" - return id - }, - errContains: "partition not found", - }, - } - for tenancyDesc, tenancyCase := range tenancyCases { - t.Run(tenancyDesc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(true). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - _, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: recordLabel}) - require.NoError(t, err) - - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - _, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: artist}) - require.NoError(t, err) - - // Each tenancy test case picks which resource to use based on the resource type's scope. - _, err = client.Read(tc.ctx, &pbresource.ReadRequest{Id: tenancyCase.modFn(artist.Id, recordLabel.Id)}) - require.Error(t, err) - require.Equal(t, codes.NotFound.String(), status.Code(err).String()) - require.ErrorContains(t, err, tenancyCase.errContains) - }) - } + server := testServer(t) + + demo.RegisterTypes(server.Registry) + client := testClient(t, server) + + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) + + _, err = client.Read(tc.ctx, &pbresource.ReadRequest{Id: artist.Id}) + require.Error(t, err) + require.Equal(t, codes.NotFound.String(), status.Code(err).String()) + require.Contains(t, err.Error(), "resource not found") }) } } @@ -233,14 +96,15 @@ func TestRead_ResourceNotFound(t *testing.T) { func TestRead_GroupVersionMismatch(t *testing.T) { for desc, tc := range readTestCases() { t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + + demo.RegisterTypes(server.Registry) + client := testClient(t, server) artist, err := demo.GenerateV2Artist() require.NoError(t, err) - _, err = client.Write(tc.ctx, &pbresource.WriteRequest{Resource: artist}) + _, err = server.Backend.WriteCAS(tc.ctx, artist) require.NoError(t, err) id := clone(artist.Id) @@ -257,39 +121,20 @@ func TestRead_GroupVersionMismatch(t *testing.T) { func TestRead_Success(t *testing.T) { for desc, tc := range readTestCases() { t.Run(desc, func(t *testing.T) { - for tenancyDesc, modFn := range tenancyCases() { - t.Run(tenancyDesc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - rsp1, err := client.Write(tc.ctx, &pbresource.WriteRequest{Resource: recordLabel}) - recordLabel = rsp1.Resource - require.NoError(t, err) - - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - rsp2, err := client.Write(tc.ctx, &pbresource.WriteRequest{Resource: artist}) - artist = rsp2.Resource - require.NoError(t, err) - - // Each tenancy test case picks which resource to use based on the resource type's scope. - req := &pbresource.ReadRequest{Id: modFn(artist.Id, recordLabel.Id)} - rsp, err := client.Read(tc.ctx, req) - require.NoError(t, err) - - switch { - case proto.Equal(rsp.Resource.Id.Type, demo.TypeV2Artist): - prototest.AssertDeepEqual(t, artist, rsp.Resource) - case proto.Equal(rsp.Resource.Id.Type, demo.TypeV1RecordLabel): - prototest.AssertDeepEqual(t, recordLabel, rsp.Resource) - default: - require.Fail(t, "unexpected resource type") - } - }) - } + server := testServer(t) + + demo.RegisterTypes(server.Registry) + client := testClient(t, server) + + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) + + resource1, err := server.Backend.WriteCAS(tc.ctx, artist) + require.NoError(t, err) + + rsp, err := client.Read(tc.ctx, &pbresource.ReadRequest{Id: artist.Id}) + require.NoError(t, err) + prototest.AssertDeepEqual(t, resource1, rsp.Resource) }) } } @@ -299,7 +144,7 @@ func TestRead_VerifyReadConsistencyArg(t *testing.T) { for desc, tc := range readTestCases() { t.Run(desc, func(t *testing.T) { server := testServer(t) - mockBackend := svc.NewMockBackend(t) + mockBackend := NewMockBackend(t) server.Backend = mockBackend demo.RegisterTypes(server.Registry) @@ -320,111 +165,40 @@ func TestRead_VerifyReadConsistencyArg(t *testing.T) { // N.B. Uses key ACLs for now. See demo.RegisterTypes() func TestRead_ACLs(t *testing.T) { type testCase struct { - res *pbresource.Resource - authz resolver.Result - codeNotExist codes.Code - codeExists codes.Code + authz resolver.Result + code codes.Code } - - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - - label, err := demo.GenerateV1RecordLabel("blink1982") - require.NoError(t, err) - testcases := map[string]testCase{ - "artist-v1/read hook denied": { - res: artist, - authz: AuthorizerFrom(t, demo.ArtistV1ReadPolicy), - codeNotExist: codes.PermissionDenied, - codeExists: codes.PermissionDenied, + "read hook denied": { + authz: AuthorizerFrom(t, demo.ArtistV1ReadPolicy), + code: codes.PermissionDenied, }, - "artist-v2/read hook allowed": { - res: artist, - authz: AuthorizerFrom(t, demo.ArtistV2ReadPolicy), - codeNotExist: codes.NotFound, - codeExists: codes.OK, - }, - // Labels have the read ACL that requires reading the data. - "label-v1/read hook denied": { - res: label, - authz: AuthorizerFrom(t, demo.LabelV1ReadPolicy), - codeNotExist: codes.NotFound, - codeExists: codes.PermissionDenied, + "read hook allowed": { + authz: AuthorizerFrom(t, demo.ArtistV2ReadPolicy), + code: codes.NotFound, }, } - adminAuthz := AuthorizerFrom(t, `key_prefix "" { policy = "write" }`) - - idx := 0 - nextTokenContext := func(t *testing.T) context.Context { - // Each query should use a distinct token string to avoid caching so we can - // change the behavior each call. - token := fmt.Sprintf("token-%d", idx) - idx++ - //nolint:staticcheck - return context.WithValue(testContext(t), "x-consul-token", token) - } - for desc, tc := range testcases { t.Run(desc, func(t *testing.T) { - dr := &dummyACLResolver{ - result: testutils.ACLsDisabled(t), - } - - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - WithACLResolver(dr). - Run(t) - - dr.SetResult(tc.authz) - testutil.RunStep(t, "does not exist", func(t *testing.T) { - _, err = client.Read(nextTokenContext(t), &pbresource.ReadRequest{Id: tc.res.Id}) - if tc.codeNotExist == codes.OK { - require.NoError(t, err) - } else { - require.Error(t, err) - } - require.Equal(t, tc.codeNotExist.String(), status.Code(err).String(), "%v", err) - }) - - // Create it. - dr.SetResult(adminAuthz) - _, err = client.Write(nextTokenContext(t), &pbresource.WriteRequest{Resource: tc.res}) - require.NoError(t, err, "could not write resource") - - dr.SetResult(tc.authz) - testutil.RunStep(t, "does exist", func(t *testing.T) { - // exercise ACL when the data does exist - _, err = client.Read(nextTokenContext(t), &pbresource.ReadRequest{Id: tc.res.Id}) - if tc.codeExists == codes.OK { - require.NoError(t, err) - } else { - require.Error(t, err) - } - require.Equal(t, tc.codeExists.String(), status.Code(err).String()) - }) - }) - } -} - -type dummyACLResolver struct { - lock sync.Mutex - result resolver.Result -} + server := testServer(t) + client := testClient(t, server) -var _ svc.ACLResolver = (*dummyACLResolver)(nil) + mockACLResolver := &MockACLResolver{} + mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(tc.authz, nil) + server.ACLResolver = mockACLResolver + demo.RegisterTypes(server.Registry) -func (r *dummyACLResolver) SetResult(result resolver.Result) { - r.lock.Lock() - defer r.lock.Unlock() - r.result = result -} + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) -func (r *dummyACLResolver) ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (resolver.Result, error) { - r.lock.Lock() - defer r.lock.Unlock() - return r.result, nil + // exercise ACL + _, err = client.Read(testContext(t), &pbresource.ReadRequest{Id: artist.Id}) + require.Error(t, err) + require.Equal(t, tc.code.String(), status.Code(err).String()) + }) + } } type readTestCase struct { diff --git a/agent/grpc-external/services/resource/server.go b/agent/grpc-external/services/resource/server.go index 21ad4bd45d2b7..51bb4610d527f 100644 --- a/agent/grpc-external/services/resource/server.go +++ b/agent/grpc-external/services/resource/server.go @@ -1,14 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resource import ( "context" - "errors" - "strings" - "time" + "github.com/hashicorp/go-hclog" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" @@ -19,7 +17,6 @@ import ( "github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/storage" - "github.com/hashicorp/consul/lib/retry" "github.com/hashicorp/consul/proto-public/pbresource" ) @@ -27,6 +24,15 @@ type Server struct { Config } +type Config struct { + Logger hclog.Logger + Registry Registry + + // Backend is the storage backend that will be used for resource persistence. + Backend Backend + ACLResolver ACLResolver +} + //go:generate mockery --name Registry --inpackage type Registry interface { resource.Registry @@ -42,25 +48,17 @@ type ACLResolver interface { ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (resolver.Result, error) } -//go:generate mockery --name TenancyBridge --inpackage -type TenancyBridge interface { - PartitionExists(partition string) (bool, error) - IsPartitionMarkedForDeletion(partition string) (bool, error) - NamespaceExists(partition, namespace string) (bool, error) - IsNamespaceMarkedForDeletion(partition, namespace string) (bool, error) -} - func NewServer(cfg Config) *Server { return &Server{cfg} } var _ pbresource.ResourceServiceServer = (*Server)(nil) -func (s *Server) Register(registrar grpc.ServiceRegistrar) { - pbresource.RegisterResourceServiceServer(registrar, s) +func (s *Server) Register(grpcServer *grpc.Server) { + pbresource.RegisterResourceServiceServer(grpcServer, s) } -// Get token from grpc metadata or AnonymousTokenId if not found +// Get token from grpc metadata or AnonymounsTokenId if not found func tokenFromContext(ctx context.Context) string { md, ok := metadata.FromIncomingContext(ctx) if !ok { @@ -102,13 +100,12 @@ func readConsistencyFrom(ctx context.Context) storage.ReadConsistency { return storage.EventualConsistency } -func (s *Server) getAuthorizer(token string, entMeta *acl.EnterpriseMeta) (acl.Authorizer, *acl.AuthorizerContext, error) { - authzContext := &acl.AuthorizerContext{} - authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, entMeta, authzContext) +func (s *Server) getAuthorizer(token string) (acl.Authorizer, error) { + authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, nil, nil) if err != nil { - return nil, nil, status.Errorf(codes.Internal, "failed getting authorizer: %v", err) + return nil, status.Errorf(codes.Internal, "failed getting authorizer: %v", err) } - return authz, authzContext, nil + return authz, nil } func isGRPCStatusError(err error) bool { @@ -120,189 +117,35 @@ func isGRPCStatusError(err error) bool { } func validateId(id *pbresource.ID, errorPrefix string) error { - if id.Type == nil { - return status.Errorf(codes.InvalidArgument, "%s.type is required", errorPrefix) + var field string + switch { + case id.Type == nil: + field = "type" + case id.Tenancy == nil: + field = "tenancy" + case id.Name == "": + field = "name" } - if err := resource.ValidateName(id.Name); err != nil { - return status.Errorf(codes.InvalidArgument, "%s.name invalid: %v", errorPrefix, err) + if field != "" { + return status.Errorf(codes.InvalidArgument, "%s.%s is required", errorPrefix, field) } - // Better UX: Allow callers to pass in nil tenancy. Defaulting and inheritance of tenancy - // from the request token will take place further down in the call flow. - if id.Tenancy == nil { - id.Tenancy = &pbresource.Tenancy{ - Partition: "", - Namespace: "", - } + // Revisit defaulting and non-namespaced resources post-1.16 + var expected string + switch { + case id.Tenancy.Partition != "default": + field, expected = "partition", "default" + case id.Tenancy.Namespace != "default": + field, expected = "namespace", "default" + case id.Tenancy.PeerName != "local": + field, expected = "peername", "local" } - if id.Tenancy.Partition != "" { - if err := resource.ValidateName(id.Tenancy.Partition); err != nil { - return status.Errorf(codes.InvalidArgument, "%s.tenancy.partition invalid: %v", errorPrefix, err) - } - } - if id.Tenancy.Namespace != "" { - if err := resource.ValidateName(id.Tenancy.Namespace); err != nil { - return status.Errorf(codes.InvalidArgument, "%s.tenancy.namespace invalid: %v", errorPrefix, err) - } + if field != "" { + return status.Errorf(codes.InvalidArgument, "%s.tenancy.%s must be %s", errorPrefix, field, expected) } - return nil } -func validateRef(ref *pbresource.Reference, errorPrefix string) error { - if ref.Type == nil { - return status.Errorf(codes.InvalidArgument, "%s.type is required", errorPrefix) - } - if err := resource.ValidateName(ref.Name); err != nil { - return status.Errorf(codes.InvalidArgument, "%s.name invalid: %v", errorPrefix, err) - } - if err := resource.ValidateName(ref.Tenancy.Partition); err != nil { - return status.Errorf(codes.InvalidArgument, "%s.tenancy.partition invalid: %v", errorPrefix, err) - } - if err := resource.ValidateName(ref.Tenancy.Namespace); err != nil { - return status.Errorf(codes.InvalidArgument, "%s.tenancy.namespace invalid: %v", errorPrefix, err) - } - return nil -} - -func validateWildcardTenancy(tenancy *pbresource.Tenancy, namePrefix string) error { - // Partition has to be a valid name if not wildcard or empty - if tenancy.Partition != "" && tenancy.Partition != "*" { - if err := resource.ValidateName(tenancy.Partition); err != nil { - return status.Errorf(codes.InvalidArgument, "tenancy.partition invalid: %v", err) - } - } - - // Namespace has to be a valid name if not wildcard or empty - if tenancy.Namespace != "" && tenancy.Namespace != "*" { - if err := resource.ValidateName(tenancy.Namespace); err != nil { - return status.Errorf(codes.InvalidArgument, "tenancy.namespace invalid: %v", err) - } - } - - // Not doing a strict resource name validation here because the prefix can be - // something like "foo-" which is a valid prefix but not valid resource name. - // relax validation to just check for lowercasing - if namePrefix != strings.ToLower(namePrefix) { - return status.Errorf(codes.InvalidArgument, "name_prefix invalid: must be lowercase alphanumeric, got: %v", namePrefix) - } - - return nil -} - -// tenancyExists return an error with the passed in gRPC status code when tenancy partition or namespace do not exist. -func tenancyExists(reg *resource.Registration, tenancyBridge TenancyBridge, tenancy *pbresource.Tenancy, errCode codes.Code) error { - if reg.Scope == resource.ScopePartition || reg.Scope == resource.ScopeNamespace { - exists, err := tenancyBridge.PartitionExists(tenancy.Partition) - switch { - case err != nil: - return err - case !exists: - return status.Errorf(errCode, "partition not found: %v", tenancy.Partition) - } - } - - if reg.Scope == resource.ScopeNamespace { - exists, err := tenancyBridge.NamespaceExists(tenancy.Partition, tenancy.Namespace) - switch { - case err != nil: - return err - case !exists: - return status.Errorf(errCode, "namespace not found: %v", tenancy.Namespace) - } - } - return nil -} - -func validateScopedTenancy(scope resource.Scope, resourceType *pbresource.Type, tenancy *pbresource.Tenancy, allowWildcards bool) error { - if scope == resource.ScopePartition && tenancy.Namespace != "" && (!allowWildcards || tenancy.Namespace != storage.Wildcard) { - return status.Errorf( - codes.InvalidArgument, - "partition scoped resource %s cannot have a namespace. got: %s", - resource.ToGVK(resourceType), - tenancy.Namespace, - ) - } - - if scope == resource.ScopeCluster { - if tenancy.Partition != "" && (!allowWildcards || tenancy.Partition != storage.Wildcard) { - return status.Errorf( - codes.InvalidArgument, - "cluster scoped resource %s cannot have a partition: %s", - resource.ToGVK(resourceType), - tenancy.Partition, - ) - } - if tenancy.Namespace != "" && (!allowWildcards || tenancy.Namespace != storage.Wildcard) { - return status.Errorf( - codes.InvalidArgument, - "cluster scoped resource %s cannot have a namespace: %s", - resource.ToGVK(resourceType), - tenancy.Namespace, - ) - } - } - return nil -} - -func isTenancyMarkedForDeletion(reg *resource.Registration, tenancyBridge TenancyBridge, tenancy *pbresource.Tenancy) (bool, error) { - if reg.Scope == resource.ScopePartition || reg.Scope == resource.ScopeNamespace { - marked, err := tenancyBridge.IsPartitionMarkedForDeletion(tenancy.Partition) - if err != nil { - return false, err - } - if marked { - return marked, nil - } - } - - if reg.Scope == resource.ScopeNamespace { - marked, err := tenancyBridge.IsNamespaceMarkedForDeletion(tenancy.Partition, tenancy.Namespace) - if err != nil { - return false, err - } - return marked, nil - } - - // Cluster scope has no tenancy so always return false - return false, nil -} - -// retryCAS retries the given operation with exponential backoff if the user -// didn't provide a version. This is intended to hide failures when the user -// isn't intentionally performing a CAS operation (all writes are, by design, -// CAS operations at the storage backend layer). -func (s *Server) retryCAS(ctx context.Context, vsn string, cas func() error) error { - if vsn != "" { - return cas() - } - - const maxAttempts = 5 - - // These parameters are fairly arbitrary, so if you find better ones then go - // ahead and swap them out! In general, we want to wait long enough to smooth - // over small amounts of storage replication lag, but not so long that we make - // matters worse by holding onto load. - backoff := &retry.Waiter{ - MinWait: 50 * time.Millisecond, - MaxWait: 1 * time.Second, - Jitter: retry.NewJitter(50), - Factor: 75 * time.Millisecond, - } - - var err error - for i := 1; i <= maxAttempts; i++ { - if err = cas(); !errors.Is(err, storage.ErrCASFailure) { - break - } - if backoff.Wait(ctx) != nil { - break - } - s.Logger.Trace("retrying failed CAS operation", "failure_count", i) - } - return err -} - func clone[T proto.Message](v T) T { return proto.Clone(v).(T) } diff --git a/agent/grpc-external/services/resource/server_ce.go b/agent/grpc-external/services/resource/server_ce.go deleted file mode 100644 index 6b2551b06b9e4..0000000000000 --- a/agent/grpc-external/services/resource/server_ce.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package resource - -import ( - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/proto-public/pbresource" - pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" -) - -func v2TenancyToV1EntMeta(tenancy *pbresource.Tenancy) *acl.EnterpriseMeta { - return acl.DefaultEnterpriseMeta() -} - -func v1EntMetaToV2Tenancy(reg *resource.Registration, entMeta *acl.EnterpriseMeta, tenancy *pbresource.Tenancy) { - if (reg.Scope == resource.ScopeNamespace || reg.Scope == resource.ScopePartition) && tenancy.Partition == "" { - tenancy.Partition = entMeta.PartitionOrDefault() - } - - if reg.Scope == resource.ScopeNamespace && tenancy.Namespace == "" { - tenancy.Namespace = entMeta.NamespaceOrDefault() - } -} - -// checkV2Tenancy returns FailedPrecondition error for namespace resource type -// when the "v2tenancy" feature flag is not enabled. -func checkV2Tenancy(useV2Tenancy bool, rtype *pbresource.Type) error { - if resource.EqualType(rtype, pbtenancy.NamespaceType) && !useV2Tenancy { - return status.Errorf(codes.FailedPrecondition, "use of the v2 namespace resource requires the \"v2tenancy\" feature flag") - } - return nil -} - -type Config struct { - Logger hclog.Logger - Registry Registry - - // Backend is the storage backend that will be used for resource persistence. - Backend Backend - ACLResolver ACLResolver - // TenancyBridge temporarily allows us to use V1 implementations of - // partitions and namespaces until V2 implementations are available. - TenancyBridge TenancyBridge - - // UseV2Tenancy is true if the "v2tenancy" experiment is active, false otherwise. - // Attempts to create v2 tenancy resources (partition or namespace) will fail when the - // flag is false. - UseV2Tenancy bool -} - -// FeatureCheck does not apply to the community edition. -func (s *Server) FeatureCheck(reg *resource.Registration) error { - return nil -} diff --git a/agent/grpc-external/services/resource/server_ce_test.go b/agent/grpc-external/services/resource/server_ce_test.go deleted file mode 100644 index f48ff3b52b992..0000000000000 --- a/agent/grpc-external/services/resource/server_ce_test.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package resource_test - -import "github.com/hashicorp/consul/acl" - -func fillEntMeta(entMeta *acl.EnterpriseMeta) { - return -} - -func fillAuthorizerContext(authzContext *acl.AuthorizerContext) { - return -} diff --git a/agent/grpc-external/services/resource/server_test.go b/agent/grpc-external/services/resource/server_test.go index f069296c88bc3..a92fff38a3266 100644 --- a/agent/grpc-external/services/resource/server_test.go +++ b/agent/grpc-external/services/resource/server_test.go @@ -1,30 +1,25 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -package resource_test +package resource import ( "context" "fmt" - "sync/atomic" "testing" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc" - "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/go-uuid" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl/resolver" - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" "github.com/hashicorp/consul/agent/grpc-external/testutils" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/internal/resource/demo" - "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/internal/storage/inmem" "github.com/hashicorp/consul/proto-public/pbresource" pbdemov2 "github.com/hashicorp/consul/proto/private/pbdemo/v2" @@ -55,51 +50,27 @@ func AuthorizerFrom(t *testing.T, policyStrs ...string) resolver.Result { } } -// Deprecated: use NewResourceServiceBuilder instead -func testServer(t *testing.T) *svc.Server { +func testServer(t *testing.T) *Server { t.Helper() backend, err := inmem.NewBackend() require.NoError(t, err) go backend.Run(testContext(t)) - // Mock the ACL Resolver to "allow all" for testing. - mockACLResolver := &svc.MockACLResolver{} + // Mock the ACL Resolver to allow everything for testing + mockACLResolver := &MockACLResolver{} mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). - Return(testutils.ACLsDisabled(t), nil). - Run(func(args mock.Arguments) { - // Caller expecting passed in tokenEntMeta and authorizerContext to be filled in. - tokenEntMeta := args.Get(1).(*acl.EnterpriseMeta) - if tokenEntMeta != nil { - fillEntMeta(tokenEntMeta) - } + Return(testutils.ACLsDisabled(t), nil) - authzContext := args.Get(2).(*acl.AuthorizerContext) - if authzContext != nil { - fillAuthorizerContext(authzContext) - } - }) - - // Mock the tenancy bridge since we can't use the real thing. - mockTenancyBridge := &svc.MockTenancyBridge{} - mockTenancyBridge.On("PartitionExists", resource.DefaultPartitionName).Return(true, nil) - mockTenancyBridge.On("NamespaceExists", resource.DefaultPartitionName, resource.DefaultNamespaceName).Return(true, nil) - mockTenancyBridge.On("PartitionExists", mock.Anything).Return(false, nil) - mockTenancyBridge.On("NamespaceExists", mock.Anything, mock.Anything).Return(false, nil) - mockTenancyBridge.On("IsPartitionMarkedForDeletion", resource.DefaultPartitionName).Return(false, nil) - mockTenancyBridge.On("IsNamespaceMarkedForDeletion", resource.DefaultPartitionName, resource.DefaultNamespaceName).Return(false, nil) - - return svc.NewServer(svc.Config{ - Logger: testutil.Logger(t), - Registry: resource.NewRegistry(), - Backend: backend, - ACLResolver: mockACLResolver, - TenancyBridge: mockTenancyBridge, + return NewServer(Config{ + Logger: testutil.Logger(t), + Registry: resource.NewRegistry(), + Backend: backend, + ACLResolver: mockACLResolver, }) } -// Deprecated: use NewResourceServiceBuilder instead -func testClient(t *testing.T, server *svc.Server) pbresource.ResourceServiceClient { +func testClient(t *testing.T, server *Server) pbresource.ResourceServiceClient { t.Helper() addr := testutils.RunTestServer(t, server) @@ -136,155 +107,3 @@ func modifyArtist(t *testing.T, res *pbresource.Resource) *pbresource.Resource { res.Data = data return res } - -// wildcardTenancyCases returns permutations of tenancy and type scope used as input -// to endpoints that accept wildcards for tenancy. -func wildcardTenancyCases() map[string]struct { - typ *pbresource.Type - tenancy *pbresource.Tenancy -} { - return map[string]struct { - typ *pbresource.Type - tenancy *pbresource.Tenancy - }{ - "namespaced type with empty partition": { - typ: demo.TypeV2Artist, - tenancy: &pbresource.Tenancy{ - Partition: "", - Namespace: resource.DefaultNamespaceName, - }, - }, - "namespaced type with empty namespace": { - typ: demo.TypeV2Artist, - tenancy: &pbresource.Tenancy{ - Partition: resource.DefaultPartitionName, - Namespace: "", - }, - }, - "namespaced type with empty partition and namespace": { - typ: demo.TypeV2Artist, - tenancy: &pbresource.Tenancy{ - Partition: "", - Namespace: "", - }, - }, - "namespaced type with wildcard partition and empty namespace": { - typ: demo.TypeV2Artist, - tenancy: &pbresource.Tenancy{ - Partition: "*", - Namespace: "", - }, - }, - "namespaced type with empty partition and wildcard namespace": { - typ: demo.TypeV2Artist, - tenancy: &pbresource.Tenancy{ - Partition: "", - Namespace: "*", - }, - }, - "partitioned type with empty partition": { - typ: demo.TypeV1RecordLabel, - tenancy: &pbresource.Tenancy{ - Partition: "", - Namespace: "", - }, - }, - "partitioned type with wildcard partition": { - typ: demo.TypeV1RecordLabel, - tenancy: &pbresource.Tenancy{ - Partition: "*", - }, - }, - "partitioned type with wildcard partition and namespace": { - typ: demo.TypeV1RecordLabel, - tenancy: &pbresource.Tenancy{ - Partition: "*", - Namespace: "*", - }, - }, - "cluster type with empty partition and namespace": { - typ: demo.TypeV1Executive, - tenancy: &pbresource.Tenancy{ - Partition: "", - Namespace: "", - }, - }, - - "cluster type with wildcard partition and namespace": { - typ: demo.TypeV1Executive, - tenancy: &pbresource.Tenancy{ - Partition: "*", - Namespace: "*", - }, - }, - } -} - -// tenancyCases returns permutations of valid tenancy structs in a resource id to use as inputs. -// - the id is for a recordLabel when the resource is partition scoped -// - the id is for an artist when the resource is namespace scoped -func tenancyCases() map[string]func(artistId, recordlabelId *pbresource.ID) *pbresource.ID { - tenancyCases := map[string]func(artistId, recordlabelId *pbresource.ID) *pbresource.ID{ - "namespaced resource provides nonempty partition and namespace": func(artistId, recordLabelId *pbresource.ID) *pbresource.ID { - return artistId - }, - "namespaced resource inherits tokens partition when empty": func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Tenancy.Partition = "" - return id - }, - "namespaced resource inherits tokens namespace when empty": func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Tenancy.Namespace = "" - return id - }, - "namespaced resource inherits tokens partition and namespace when empty": func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Tenancy.Partition = "" - id.Tenancy.Namespace = "" - return id - }, - "namespaced resource inherits tokens partition and namespace when tenancy nil": func(artistId, _ *pbresource.ID) *pbresource.ID { - id := clone(artistId) - id.Tenancy = nil - return id - }, - "partitioned resource provides nonempty partition": func(_, recordLabelId *pbresource.ID) *pbresource.ID { - return recordLabelId - }, - "partitioned resource inherits tokens partition when empty": func(_, recordLabelId *pbresource.ID) *pbresource.ID { - id := clone(recordLabelId) - id.Tenancy.Partition = "" - return id - }, - "partitioned resource inherits tokens partition when tenancy nil": func(_, recordLabelId *pbresource.ID) *pbresource.ID { - id := clone(recordLabelId) - id.Tenancy = nil - return id - }, - } - return tenancyCases -} - -type blockOnceBackend struct { - storage.Backend - - done uint32 - readCompletedCh chan struct{} - blockCh chan struct{} -} - -func (b *blockOnceBackend) Read(ctx context.Context, consistency storage.ReadConsistency, id *pbresource.ID) (*pbresource.Resource, error) { - res, err := b.Backend.Read(ctx, consistency, id) - - // Block for exactly one call to Read. All subsequent calls (including those - // concurrent to the blocked call) will return immediately. - if atomic.CompareAndSwapUint32(&b.done, 0, 1) { - close(b.readCompletedCh) - <-b.blockCh - } - - return res, err -} - -func clone[T proto.Message](v T) T { return proto.Clone(v).(T) } diff --git a/agent/grpc-external/services/resource/testing/builder.go b/agent/grpc-external/services/resource/testing/builder.go deleted file mode 100644 index 17a99be040221..0000000000000 --- a/agent/grpc-external/services/resource/testing/builder.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package testing - -import ( - "context" - - "github.com/fullstorydev/grpchan/inprocgrpc" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/acl" - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - "github.com/hashicorp/consul/agent/grpc-external/testutils" - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/internal/storage/inmem" - "github.com/hashicorp/consul/internal/tenancy" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/sdk/testutil" -) - -// NewResourceServiceBuilder is the preferred way to configure and run -// an isolated in-process instance of the resource service for unit -// testing. The final call to `Run()` returns a client you can use for -// making requests. -func NewResourceServiceBuilder() *Builder { - b := &Builder{ - useV2Tenancy: false, - registry: resource.NewRegistry(), - // Regardless of whether using mock of v2tenancy, always make sure - // the builtin tenancy exists. - tenancies: []*pbresource.Tenancy{resource.DefaultNamespacedTenancy()}, - cloning: true, - } - return b -} - -// WithV2Tenancy configures which tenancy bridge is used. -// -// true => real v2 default partition and namespace via v2 tenancy bridge -// false => mock default partition and namespace since v1 tenancy bridge can't be used (not spinning up an entire server here) -func (b *Builder) WithV2Tenancy(useV2Tenancy bool) *Builder { - b.useV2Tenancy = useV2Tenancy - return b -} - -// Registry provides access to the constructed registry post-Run() when -// needed by other test dependencies. -func (b *Builder) Registry() resource.Registry { - return b.registry -} - -// ServiceImpl provides access to the actual server side implementation of the resource service. This should never be -// used/accessed without good reason. The current justifying use case is to monkeypatch the ACL resolver post-creation -// to allow unfettered writes which some ACL related tests require to put test data in place. -func (b *Builder) ServiceImpl() *svc.Server { - return b.serviceImpl -} - -func (b *Builder) WithRegisterFns(registerFns ...func(resource.Registry)) *Builder { - for _, registerFn := range registerFns { - b.registerFns = append(b.registerFns, registerFn) - } - return b -} - -func (b *Builder) WithACLResolver(aclResolver svc.ACLResolver) *Builder { - b.aclResolver = aclResolver - return b -} - -// WithTenancies adds additional partitions and namespaces if default/default -// is not sufficient. -func (b *Builder) WithTenancies(tenancies ...*pbresource.Tenancy) *Builder { - for _, tenancy := range tenancies { - b.tenancies = append(b.tenancies, tenancy) - } - return b -} - -// WithCloningDisabled disables resource service client functionality that will -// clone protobuf message types as they pass through. By default -// cloning is enabled. -// -// For in-process gRPC interactions we prefer to use an in-memory gRPC client. This -// allows our controller infrastructure to avoid any unnecessary protobuf serialization -// and deserialization and for controller caching to not duplicate memory that the -// resource service is already holding on to. However, clients (including controllers) -// often want to be able to perform read-modify-write ops and for the sake of not -// forcing all call sites to be aware of the shared memory and to not touch it we -// enable cloning in the clients that we give to those bits of code. -func (b *Builder) WithCloningDisabled() *Builder { - b.cloning = false - return b -} - -// Run starts the resource service and returns a client. -func (b *Builder) Run(t testutil.TestingTB) pbresource.ResourceServiceClient { - // backend cannot be customized - backend, err := inmem.NewBackend() - require.NoError(t, err) - - // start the backend and add teardown hook - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - go backend.Run(ctx) - - // Automatically add tenancy types if v2 tenancy enabled - if b.useV2Tenancy { - b.registerFns = append(b.registerFns, tenancy.RegisterTypes) - } - - for _, registerFn := range b.registerFns { - registerFn(b.registry) - } - - var tenancyBridge resource.TenancyBridge - if !b.useV2Tenancy { - // use mock tenancy bridge. default/default has already been added out of the box - mockTenancyBridge := &svc.MockTenancyBridge{} - - for _, tenancy := range b.tenancies { - mockTenancyBridge.On("PartitionExists", tenancy.Partition).Return(true, nil) - mockTenancyBridge.On("NamespaceExists", tenancy.Partition, tenancy.Namespace).Return(true, nil) - mockTenancyBridge.On("IsPartitionMarkedForDeletion", tenancy.Partition).Return(false, nil) - mockTenancyBridge.On("IsNamespaceMarkedForDeletion", tenancy.Partition, tenancy.Namespace).Return(false, nil) - } - - tenancyBridge = mockTenancyBridge - } else { - // use v2 tenancy bridge. population comes later after client injected. - tenancyBridge = tenancy.NewV2TenancyBridge() - } - - if b.aclResolver == nil { - // When not provided (regardless of V1 tenancy or V2 tenancy), configure an ACL resolver - // that has ACLs disabled and fills in "default" for the partition and namespace when - // not provided. This is similar to user initiated requests. - // - // Controllers under test should be providing full tenancy since they will run with the DANGER_NO_AUTH. - mockACLResolver := &svc.MockACLResolver{} - mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). - Return(testutils.ACLsDisabled(t), nil). - Run(func(args mock.Arguments) { - // Caller expecting passed in tokenEntMeta and authorizerContext to be filled in. - tokenEntMeta := args.Get(1).(*acl.EnterpriseMeta) - if tokenEntMeta != nil { - FillEntMeta(tokenEntMeta) - } - - authzContext := args.Get(2).(*acl.AuthorizerContext) - if authzContext != nil { - FillAuthorizerContext(authzContext) - } - }) - b.aclResolver = mockACLResolver - } - - // ent only - b.ensureLicenseManager() - - config := b.newConfig(testutil.Logger(t), backend, tenancyBridge) - - b.serviceImpl = svc.NewServer(*config) - ch := &inprocgrpc.Channel{} - pbresource.RegisterResourceServiceServer(ch, b.serviceImpl) - client := pbresource.NewResourceServiceClient(ch) - - if b.cloning { - // enable protobuf cloning wrapper - client = pbresource.NewCloningResourceServiceClient(client) - } - - // HACK ALERT: The client needs to be injected into the V2TenancyBridge - // after it has been created due the the circular dependency. This will - // go away when the tenancy bridge is removed and V1 is no more, however - // long that takes. - switch config.TenancyBridge.(type) { - case *tenancy.V2TenancyBridge: - config.TenancyBridge.(*tenancy.V2TenancyBridge).WithClient(client) - // Default partition and namespace can finally be created - require.NoError(t, initTenancy(ctx, backend)) - - for _, tenancy := range b.tenancies { - if tenancy.Partition == resource.DefaultPartitionName && tenancy.Namespace == resource.DefaultNamespaceName { - continue - } - t.Fatalf("TODO: implement creation of passed in v2 tenancy: %v", tenancy) - } - } - return client -} diff --git a/agent/grpc-external/services/resource/testing/builder_ce.go b/agent/grpc-external/services/resource/testing/builder_ce.go deleted file mode 100644 index d7f9a7c733025..0000000000000 --- a/agent/grpc-external/services/resource/testing/builder_ce.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package testing - -import ( - "github.com/hashicorp/go-hclog" - - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -type Builder struct { - registry resource.Registry - registerFns []func(resource.Registry) - useV2Tenancy bool - tenancies []*pbresource.Tenancy - aclResolver svc.ACLResolver - serviceImpl *svc.Server - cloning bool -} - -func (b *Builder) ensureLicenseManager() { -} - -func (b *Builder) newConfig(logger hclog.Logger, backend svc.Backend, tenancyBridge resource.TenancyBridge) *svc.Config { - return &svc.Config{ - Logger: logger, - Registry: b.registry, - Backend: backend, - ACLResolver: b.aclResolver, - TenancyBridge: tenancyBridge, - UseV2Tenancy: b.useV2Tenancy, - } -} diff --git a/agent/grpc-external/services/resource/testing/testing.go b/agent/grpc-external/services/resource/testing/testing.go index 906953b034af1..13f579417b026 100644 --- a/agent/grpc-external/services/resource/testing/testing.go +++ b/agent/grpc-external/services/resource/testing/testing.go @@ -4,37 +4,59 @@ package testing import ( + "context" "testing" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" - "github.com/hashicorp/go-uuid" - - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl/resolver" - "github.com/hashicorp/consul/agent/structs" + svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" + internal "github.com/hashicorp/consul/agent/grpc-internal" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/storage/inmem" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/testutil" ) -func randomACLIdentity(t *testing.T) structs.ACLIdentity { - id, err := uuid.GenerateUUID() +// RunResourceService runs a Resource Service for the duration of the test and +// returns a client to interact with it. ACLs will be disabled. +func RunResourceService(t *testing.T, registerFns ...func(resource.Registry)) pbresource.ResourceServiceClient { + t.Helper() + + backend, err := inmem.NewBackend() require.NoError(t, err) - return &structs.ACLToken{AccessorID: id} -} + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + go backend.Run(ctx) -func AuthorizerFrom(t *testing.T, policyStrs ...string) resolver.Result { - policies := []*acl.Policy{} - for _, policyStr := range policyStrs { - policy, err := acl.NewPolicyFromSource(policyStr, nil, nil) - require.NoError(t, err) - policies = append(policies, policy) + registry := resource.NewRegistry() + for _, fn := range registerFns { + fn(registry) } - authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), policies, nil) + server := grpc.NewServer() + + svc.NewServer(svc.Config{ + Backend: backend, + Registry: registry, + Logger: testutil.Logger(t), + ACLResolver: resolver.DANGER_NO_AUTH{}, + }).Register(server) + + pipe := internal.NewPipeListener() + go server.Serve(pipe) + t.Cleanup(server.Stop) + + conn, err := grpc.Dial("", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(pipe.DialContext), + grpc.WithBlock(), + ) require.NoError(t, err) + t.Cleanup(func() { _ = conn.Close() }) - return resolver.Result{ - Authorizer: authz, - ACLIdentity: randomACLIdentity(t), - } + return pbresource.NewResourceServiceClient(conn) } diff --git a/agent/grpc-external/services/resource/testing/testing_ce.go b/agent/grpc-external/services/resource/testing/testing_ce.go deleted file mode 100644 index 926acf6d38f74..0000000000000 --- a/agent/grpc-external/services/resource/testing/testing_ce.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package testing - -import ( - "context" - "errors" - "time" - - "github.com/oklog/ulid/v2" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/internal/storage" - "github.com/hashicorp/consul/internal/storage/inmem" - "github.com/hashicorp/consul/proto-public/pbresource" - pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" -) - -func FillEntMeta(entMeta *acl.EnterpriseMeta) { - // nothing to to in CE. -} - -func FillAuthorizerContext(authzContext *acl.AuthorizerContext) { - // nothing to to in CE. -} - -// initTenancy creates the builtin v2 namespace resource only. The builtin -// v2 partition is not created because we're in CE. -func initTenancy(ctx context.Context, b *inmem.Backend) error { - nsData, err := anypb.New(&pbtenancy.Namespace{Description: "default namespace in default partition"}) - if err != nil { - return err - } - nsID := &pbresource.ID{ - Type: pbtenancy.NamespaceType, - Name: resource.DefaultNamespaceName, - Tenancy: resource.DefaultPartitionedTenancy(), - Uid: ulid.Make().String(), - } - read, err := b.Read(ctx, storage.StrongConsistency, nsID) - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return err - } - if read == nil && errors.Is(err, storage.ErrNotFound) { - _, err = b.WriteCAS(ctx, &pbresource.Resource{ - Id: nsID, - Generation: ulid.Make().String(), - Data: nsData, - Metadata: map[string]string{ - "generated_at": time.Now().Format(time.RFC3339), - }, - }) - if err != nil { - return err - } - } - return nil -} diff --git a/agent/grpc-external/services/resource/watch.go b/agent/grpc-external/services/resource/watch.go index 511802f2cc206..35ec14513ac37 100644 --- a/agent/grpc-external/services/resource/watch.go +++ b/agent/grpc-external/services/resource/watch.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resource @@ -10,27 +10,28 @@ import ( "google.golang.org/grpc/status" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" ) func (s *Server) WatchList(req *pbresource.WatchListRequest, stream pbresource.ResourceService_WatchListServer) error { - reg, err := s.ensureWatchListRequestValid(req) + if err := validateWatchListRequest(req); err != nil { + return err + } + + // check type exists + reg, err := s.resolveType(req.Type) if err != nil { return err } - // v1 ACL subsystem is "wildcard" aware so just pass on through. - entMeta := v2TenancyToV1EntMeta(req.Tenancy) - token := tokenFromContext(stream.Context()) - authz, authzContext, err := s.getAuthorizer(token, entMeta) + authz, err := s.getAuthorizer(tokenFromContext(stream.Context())) if err != nil { return err } - // Check list ACL. - err = reg.ACLs.List(authz, authzContext) + // check acls + err = reg.ACLs.List(authz, req.Tenancy) switch { case acl.IsErrPermissionDenied(err): return status.Error(codes.PermissionDenied, err.Error()) @@ -38,9 +39,6 @@ func (s *Server) WatchList(req *pbresource.WatchListRequest, stream pbresource.R return status.Errorf(codes.Internal, "failed list acl: %v", err) } - // Ensure we're defaulting correctly when request tenancy units are empty. - v1EntMetaToV2Tenancy(reg, entMeta, req.Tenancy) - unversionedType := storage.UnversionedTypeFrom(req.Type) watch, err := s.Backend.WatchList( stream.Context(), @@ -62,41 +60,13 @@ func (s *Server) WatchList(req *pbresource.WatchListRequest, stream pbresource.R return status.Errorf(codes.Internal, "failed next: %v", err) } - var resource *pbresource.Resource - switch { - case event.GetUpsert() != nil: - resource = event.GetUpsert().GetResource() - case event.GetDelete() != nil: - resource = event.GetDelete().GetResource() - case event.GetEndOfSnapshot() != nil: - // skip the rest and send the event. - if err = stream.Send(event); err != nil { - return err - } - continue - default: - // skip unknown type of operation - continue - } - - // From here on out we assume the event is operating on a non-nil resource. - // drop group versions that don't match - if resource.Id.Type.GroupVersion != req.Type.GroupVersion { + if event.Resource.Id.Type.GroupVersion != req.Type.GroupVersion { continue } - // Need to rebuild authorizer per resource since wildcard inputs may - // result in different tenancies. Consider caching per tenancy if this - // is deemed expensive. - entMeta = v2TenancyToV1EntMeta(resource.Id.Tenancy) - authz, authzContext, err = s.getAuthorizer(token, entMeta) - if err != nil { - return err - } - // filter out items that don't pass read ACLs - err = reg.ACLs.Read(authz, authzContext, resource.Id, resource) + err = reg.ACLs.Read(authz, event.Resource.Id) switch { case acl.IsErrPermissionDenied(err): continue @@ -110,57 +80,15 @@ func (s *Server) WatchList(req *pbresource.WatchListRequest, stream pbresource.R } } -func (s *Server) ensureWatchListRequestValid(req *pbresource.WatchListRequest) (*resource.Registration, error) { - if req.Type == nil { - return nil, status.Errorf(codes.InvalidArgument, "type is required") - } - - // Check type exists. - reg, err := s.resolveType(req.Type) - if err != nil { - return nil, err - } - - // Ignore return value since read ops are allowed but will log a warning if the feature is - // not enabled in the license. - _ = s.FeatureCheck(reg) - - // if no tenancy is passed defaults to wildcard - if req.Tenancy == nil { - req.Tenancy = wildcardTenancyFor(reg.Scope) - } - - if err = checkV2Tenancy(s.UseV2Tenancy, req.Type); err != nil { - return nil, err - } - - if err := validateWildcardTenancy(req.Tenancy, req.NamePrefix); err != nil { - return nil, err - } - - // Check scope - if err = validateScopedTenancy(reg.Scope, req.Type, req.Tenancy, true); err != nil { - return nil, err - } - - return reg, nil -} - -func wildcardTenancyFor(scope resource.Scope) *pbresource.Tenancy { - var defaultTenancy *pbresource.Tenancy - - switch scope { - case resource.ScopeCluster: - defaultTenancy = &pbresource.Tenancy{} - case resource.ScopePartition: - defaultTenancy = &pbresource.Tenancy{ - Partition: storage.Wildcard, - } +func validateWatchListRequest(req *pbresource.WatchListRequest) error { + var field string + switch { + case req.Type == nil: + field = "type" + case req.Tenancy == nil: + field = "tenancy" default: - defaultTenancy = &pbresource.Tenancy{ - Partition: storage.Wildcard, - Namespace: storage.Wildcard, - } + return nil } - return defaultTenancy + return status.Errorf(codes.InvalidArgument, "%s is required", field) } diff --git a/agent/grpc-external/services/resource/watch_test.go b/agent/grpc-external/services/resource/watch_test.go index 5ccdb609babad..95695f295ebd4 100644 --- a/agent/grpc-external/services/resource/watch_test.go +++ b/agent/grpc-external/services/resource/watch_test.go @@ -1,102 +1,44 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -package resource_test +package resource import ( "context" "errors" "io" - "strings" "testing" "time" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/acl/resolver" - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/agent/grpc-external/testutils" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto/private/prototest" -) -// TODO: Update all tests to use true/false table test for v2tenancy + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) func TestWatchList_InputValidation(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) - type testCase struct { - modFn func(*pbresource.WatchListRequest) - errContains string - } + demo.RegisterTypes(server.Registry) - testCases := map[string]testCase{ - "no type": { - modFn: func(req *pbresource.WatchListRequest) { req.Type = nil }, - errContains: "type is required", - }, - "partition mixed case": { - modFn: func(req *pbresource.WatchListRequest) { req.Tenancy.Partition = "Default" }, - errContains: "tenancy.partition invalid", - }, - "partition too long": { - modFn: func(req *pbresource.WatchListRequest) { - req.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) - }, - errContains: "tenancy.partition invalid", - }, - "namespace mixed case": { - modFn: func(req *pbresource.WatchListRequest) { req.Tenancy.Namespace = "Default" }, - errContains: "tenancy.namespace invalid", - }, - "namespace too long": { - modFn: func(req *pbresource.WatchListRequest) { - req.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) - }, - errContains: "tenancy.namespace invalid", - }, - "name_prefix mixed case": { - modFn: func(req *pbresource.WatchListRequest) { req.NamePrefix = "Smashing" }, - errContains: "name_prefix invalid", - }, - "partitioned type provides non-empty namespace": { - modFn: func(req *pbresource.WatchListRequest) { - req.Type = demo.TypeV1RecordLabel - req.Tenancy.Namespace = "bad" - }, - errContains: "cannot have a namespace", - }, - "cluster scope with non-empty partition": { - modFn: func(req *pbresource.WatchListRequest) { - req.Type = demo.TypeV1Executive - req.Tenancy = &pbresource.Tenancy{Partition: "bad"} - }, - errContains: "cannot have a partition", - }, - "cluster scope with non-empty namespace": { - modFn: func(req *pbresource.WatchListRequest) { - req.Type = demo.TypeV1Executive - req.Tenancy = &pbresource.Tenancy{Namespace: "bad"} - }, - errContains: "cannot have a namespace", - }, + testCases := map[string]func(*pbresource.WatchListRequest){ + "no type": func(req *pbresource.WatchListRequest) { req.Type = nil }, + "no tenancy": func(req *pbresource.WatchListRequest) { req.Tenancy = nil }, } - for desc, tc := range testCases { + for desc, modFn := range testCases { t.Run(desc, func(t *testing.T) { req := &pbresource.WatchListRequest{ Type: demo.TypeV2Album, - Tenancy: resource.DefaultNamespacedTenancy(), + Tenancy: demo.TenancyDefault, } - tc.modFn(req) + modFn(req) stream, err := client.WatchList(testContext(t), req) require.NoError(t, err) @@ -104,7 +46,6 @@ func TestWatchList_InputValidation(t *testing.T) { _, err = stream.Recv() require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, tc.errContains) }) } } @@ -112,11 +53,12 @@ func TestWatchList_InputValidation(t *testing.T) { func TestWatchList_TypeNotFound(t *testing.T) { t.Parallel() - client := svctest.NewResourceServiceBuilder().Run(t) + server := testServer(t) + client := testClient(t, server) stream, err := client.WatchList(context.Background(), &pbresource.WatchListRequest{ Type: demo.TypeV2Artist, - Tenancy: resource.DefaultNamespacedTenancy(), + Tenancy: demo.TenancyDefault, NamePrefix: "", }) require.NoError(t, err) @@ -130,104 +72,43 @@ func TestWatchList_TypeNotFound(t *testing.T) { func TestWatchList_GroupVersionMatches(t *testing.T) { t.Parallel() - b := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes) - client := b.Run(t) - + server := testServer(t) + client := testClient(t, server) + demo.RegisterTypes(server.Registry) ctx := context.Background() // create a watch stream, err := client.WatchList(ctx, &pbresource.WatchListRequest{ Type: demo.TypeV2Artist, - Tenancy: resource.DefaultNamespacedTenancy(), + Tenancy: demo.TenancyDefault, NamePrefix: "", }) require.NoError(t, err) rspCh := handleResourceStream(t, stream) - mustGetEndOfSnapshot(t, rspCh) - artist, err := demo.GenerateV2Artist() require.NoError(t, err) // insert and verify upsert event received - r1Resp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) - r1 := r1Resp.Resource + r1, err := server.Backend.WriteCAS(ctx, artist) require.NoError(t, err) rsp := mustGetResource(t, rspCh) - require.NotNil(t, rsp.GetUpsert()) - prototest.AssertDeepEqual(t, r1, rsp.GetUpsert().Resource) + require.Equal(t, pbresource.WatchEvent_OPERATION_UPSERT, rsp.Operation) + prototest.AssertDeepEqual(t, r1, rsp.Resource) // update and verify upsert event received r2 := modifyArtist(t, r1) - r2Resp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: r2}) + r2, err = server.Backend.WriteCAS(ctx, r2) require.NoError(t, err) - r2 = r2Resp.Resource rsp = mustGetResource(t, rspCh) - require.NotNil(t, rsp.GetUpsert()) - prototest.AssertDeepEqual(t, r2, rsp.GetUpsert().Resource) + require.Equal(t, pbresource.WatchEvent_OPERATION_UPSERT, rsp.Operation) + prototest.AssertDeepEqual(t, r2, rsp.Resource) // delete and verify delete event received - _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: r2.Id, Version: r2.Version}) + err = server.Backend.DeleteCAS(ctx, r2.Id, r2.Version) require.NoError(t, err) rsp = mustGetResource(t, rspCh) - require.NotNil(t, rsp.GetDelete()) - prototest.AssertDeepEqual(t, r2.Id, rsp.GetDelete().Resource.Id) -} - -func TestWatchList_Tenancy_Defaults_And_Normalization(t *testing.T) { - // Test units of tenancy get lowercased and defaulted correctly when empty. - for desc, tc := range wildcardTenancyCases() { - t.Run(desc, func(t *testing.T) { - ctx := context.Background() - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Create a watch. - stream, err := client.WatchList(ctx, &pbresource.WatchListRequest{ - Type: tc.typ, - Tenancy: tc.tenancy, - NamePrefix: "", - }) - require.NoError(t, err) - rspCh := handleResourceStream(t, stream) - - mustGetEndOfSnapshot(t, rspCh) - - // Testcase will pick one of executive, recordLabel or artist based on scope of type. - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - executive, err := demo.GenerateV1Executive("king-arthur", "CEO") - require.NoError(t, err) - - // Create and verify upsert event received. - rlRsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: recordLabel}) - require.NoError(t, err) - artistRsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) - require.NoError(t, err) - executiveRsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: executive}) - require.NoError(t, err) - - var expected *pbresource.Resource - switch { - case resource.EqualType(tc.typ, demo.TypeV1RecordLabel): - expected = rlRsp.Resource - case resource.EqualType(tc.typ, demo.TypeV2Artist): - expected = artistRsp.Resource - case resource.EqualType(tc.typ, demo.TypeV1Executive): - expected = executiveRsp.Resource - default: - require.Fail(t, "unsupported type", tc.typ) - } - - rsp := mustGetResource(t, rspCh) - require.NotNil(t, rsp.GetUpsert()) - prototest.AssertDeepEqual(t, expected, rsp.GetUpsert().Resource) - }) - } + require.Equal(t, pbresource.WatchEvent_OPERATION_DELETE, rsp.Operation) } func TestWatchList_GroupVersionMismatch(t *testing.T) { @@ -236,39 +117,34 @@ func TestWatchList_GroupVersionMismatch(t *testing.T) { // Then no watch events should be emitted t.Parallel() - b := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes) - client := b.Run(t) - + server := testServer(t) + demo.RegisterTypes(server.Registry) + client := testClient(t, server) ctx := context.Background() // create a watch for TypeArtistV1 stream, err := client.WatchList(ctx, &pbresource.WatchListRequest{ Type: demo.TypeV1Artist, - Tenancy: resource.DefaultNamespacedTenancy(), + Tenancy: demo.TenancyDefault, NamePrefix: "", }) require.NoError(t, err) rspCh := handleResourceStream(t, stream) - mustGetEndOfSnapshot(t, rspCh) - artist, err := demo.GenerateV2Artist() require.NoError(t, err) // insert - r1Resp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) + r1, err := server.Backend.WriteCAS(ctx, artist) require.NoError(t, err) - r1 := r1Resp.Resource // update r2 := clone(r1) - r2Resp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: r2}) + r2, err = server.Backend.WriteCAS(ctx, r2) require.NoError(t, err) - r2 = r2Resp.Resource // delete - _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: r2.Id, Version: r2.Version}) + err = server.Backend.DeleteCAS(ctx, r2.Id, r2.Version) require.NoError(t, err) // verify no events received @@ -280,7 +156,7 @@ func TestWatchList_ACL_ListDenied(t *testing.T) { t.Parallel() // deny all - rspCh, _ := roundTripACL(t, testutils.ACLNoPermissions(t), true) + rspCh, _ := roundTripACL(t, testutils.ACLNoPermissions(t)) // verify key:list denied err := mustGetError(t, rspCh) @@ -298,7 +174,7 @@ func TestWatchList_ACL_ListAllowed_ReadDenied(t *testing.T) { key_prefix "resource/" { policy = "list" } key_prefix "resource/demo.v2.Artist/" { policy = "deny" } `) - rspCh, _ := roundTripACL(t, authz, false) + rspCh, _ := roundTripACL(t, authz) // verify resource filtered out by key:read denied, hence no events mustGetNoResource(t, rspCh) @@ -313,26 +189,23 @@ func TestWatchList_ACL_ListAllowed_ReadAllowed(t *testing.T) { key_prefix "resource/" { policy = "list" } key_prefix "resource/demo.v2.Artist/" { policy = "read" } `) - rspCh, artist := roundTripACL(t, authz, false) + rspCh, artist := roundTripACL(t, authz) // verify resource not filtered out by acl event := mustGetResource(t, rspCh) - - require.NotNil(t, event.GetUpsert()) - prototest.AssertDeepEqual(t, artist, event.GetUpsert().Resource) + prototest.AssertDeepEqual(t, artist, event.Resource) } // roundtrip a WatchList which attempts to stream back a single write event -func roundTripACL(t *testing.T, authz acl.Authorizer, expectErr bool) (<-chan resourceOrError, *pbresource.Resource) { - mockACLResolver := &svc.MockACLResolver{} - mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). - Return(resolver.Result{Authorizer: authz}, nil) +func roundTripACL(t *testing.T, authz acl.Authorizer) (<-chan resourceOrError, *pbresource.Resource) { + server := testServer(t) + client := testClient(t, server) - b := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - WithACLResolver(mockACLResolver) - client := b.Run(t) - server := b.ServiceImpl() + mockACLResolver := &MockACLResolver{} + mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(authz, nil) + server.ACLResolver = mockACLResolver + demo.RegisterTypes(server.Registry) artist, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -345,10 +218,6 @@ func roundTripACL(t *testing.T, authz acl.Authorizer, expectErr bool) (<-chan re require.NoError(t, err) rspCh := handleResourceStream(t, stream) - if !expectErr { - mustGetEndOfSnapshot(t, rspCh) - } - // induce single watch event artist, err = server.Backend.WriteCAS(context.Background(), artist) require.NoError(t, err) @@ -369,11 +238,6 @@ func mustGetNoResource(t *testing.T, ch <-chan resourceOrError) { } } -func mustGetEndOfSnapshot(t *testing.T, ch <-chan resourceOrError) { - event := mustGetResource(t, ch) - require.NotNil(t, event.GetEndOfSnapshot(), "expected EndOfSnapshot but got got event %T", event.GetEvent()) -} - func mustGetResource(t *testing.T, ch <-chan resourceOrError) *pbresource.WatchEvent { t.Helper() @@ -425,33 +289,3 @@ type resourceOrError struct { rsp *pbresource.WatchEvent err error } - -func TestWatchList_NoTenancy(t *testing.T) { - t.Parallel() - - ctx := context.Background() - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Create a watch. - stream, err := client.WatchList(ctx, &pbresource.WatchListRequest{ - Type: demo.TypeV1RecordLabel, - }) - require.NoError(t, err) - rspCh := handleResourceStream(t, stream) - - mustGetEndOfSnapshot(t, rspCh) - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - - // Create and verify upsert event received. - rsp1, err := client.Write(ctx, &pbresource.WriteRequest{Resource: recordLabel}) - require.NoError(t, err) - - rsp2 := mustGetResource(t, rspCh) - - require.NotNil(t, rsp2.GetUpsert()) - prototest.AssertDeepEqual(t, rsp1.Resource, rsp2.GetUpsert().Resource) -} diff --git a/agent/grpc-external/services/resource/write.go b/agent/grpc-external/services/resource/write.go index 99341b9ba52b3..34799ae8d82ec 100644 --- a/agent/grpc-external/services/resource/write.go +++ b/agent/grpc-external/services/resource/write.go @@ -1,20 +1,22 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resource import ( "context" "errors" + "strings" + "time" "github.com/oklog/ulid/v2" - "golang.org/x/exp/maps" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/storage" + "github.com/hashicorp/consul/lib/retry" "github.com/hashicorp/consul/proto-public/pbresource" ) @@ -35,11 +37,49 @@ import ( var errUseWriteStatus = status.Error(codes.InvalidArgument, "resource.status can only be set using the WriteStatus endpoint") func (s *Server) Write(ctx context.Context, req *pbresource.WriteRequest) (*pbresource.WriteResponse, error) { - tenancyMarkedForDeletion, err := s.mutateAndValidate(ctx, req.Resource, true) + if err := validateWriteRequest(req); err != nil { + return nil, err + } + + reg, err := s.resolveType(req.Resource.Id.Type) if err != nil { return nil, err } + authz, err := s.getAuthorizer(tokenFromContext(ctx)) + if err != nil { + return nil, err + } + + // check acls + err = reg.ACLs.Write(authz, req.Resource.Id) + switch { + case acl.IsErrPermissionDenied(err): + return nil, status.Error(codes.PermissionDenied, err.Error()) + case err != nil: + return nil, status.Errorf(codes.Internal, "failed write acl: %v", err) + } + + // Check the user sent the correct type of data. + if !req.Resource.Data.MessageIs(reg.Proto) { + got := strings.TrimPrefix(req.Resource.Data.TypeUrl, "type.googleapis.com/") + + return nil, status.Errorf( + codes.InvalidArgument, + "resource.data is of wrong type (expected=%q, got=%q)", + reg.Proto.ProtoReflect().Descriptor().FullName(), + got, + ) + } + + if err = reg.Validate(req.Resource); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + if err = reg.Mutate(req.Resource); err != nil { + return nil, status.Errorf(codes.Internal, "failed mutate hook: %v", err.Error()) + } + // At the storage backend layer, all writes are CAS operations. // // This makes it possible to *safely* do things like keeping the Uid stable @@ -79,16 +119,6 @@ func (s *Server) Write(ctx context.Context, req *pbresource.WriteRequest) (*pbre return errUseWriteStatus } - // Reject creation in tenancy unit marked for deletion. - if tenancyMarkedForDeletion { - return status.Errorf(codes.InvalidArgument, "tenancy marked for deletion: %v", input.Id.Tenancy.String()) - } - - // Reject attempts to create a resource with a deletionTimestamp. - if resource.IsMarkedForDeletion(input) { - return status.Errorf(codes.InvalidArgument, "resource.metadata.%s can't be set on resource creation", resource.DeletionTimestampKey) - } - // Generally, we expect resources with owners to be created by controllers, // and they should provide the Uid. In cases where no Uid is given (e.g. the // owner is specified in the resource HCL) we'll look up whatever the current @@ -130,11 +160,9 @@ func (s *Server) Write(ctx context.Context, req *pbresource.WriteRequest) (*pbre // just want to update the current resource. input.Id = existing.Id - // User is doing a non-CAS write, use the current version and preserve - // deferred deletion metadata if not present. + // User is doing a non-CAS write, use the current version. if input.Version == "" { input.Version = existing.Version - preserveDeferredDeletionMetadata(input, existing) } // Check the stored version matches the user-given version. @@ -172,13 +200,6 @@ func (s *Server) Write(ctx context.Context, req *pbresource.WriteRequest) (*pbre return errUseWriteStatus } - // If the write is related to a deferred deletion (marking for deletion or removal - // of finalizers), make sure nothing else is changed. - if err := vetIfDeleteRelated(input, existing, tenancyMarkedForDeletion); err != nil { - return err - } - - // Otherwise, let the write continue default: return err } @@ -201,139 +222,64 @@ func (s *Server) Write(ctx context.Context, req *pbresource.WriteRequest) (*pbre return &pbresource.WriteResponse{Resource: result}, nil } -func ensureMetadataSameExceptFor(input *pbresource.Resource, existing *pbresource.Resource, ignoreKey string) error { - // Work on copies since we're mutating them - inputCopy := maps.Clone(input.Metadata) - existingCopy := maps.Clone(existing.Metadata) - - delete(inputCopy, ignoreKey) - delete(existingCopy, ignoreKey) - - if !maps.Equal(inputCopy, existingCopy) { - return status.Error(codes.InvalidArgument, "cannot modify metadata") - } - - return nil -} - -func ensureDataUnchanged(input *pbresource.Resource, existing *pbresource.Resource) error { - // Check data last since this could potentially be the most expensive comparison. - if !proto.Equal(input.Data, existing.Data) { - return status.Error(codes.InvalidArgument, "cannot modify data") - } - return nil -} - -// EnsureFinalizerRemoved ensures at least one finalizer was removed. -// TODO: only public for test to access -func EnsureFinalizerRemoved(input *pbresource.Resource, existing *pbresource.Resource) error { - inputFinalizers := resource.GetFinalizers(input) - existingFinalizers := resource.GetFinalizers(existing) - if !inputFinalizers.IsProperSubset(existingFinalizers) { - return status.Error(codes.InvalidArgument, "expected at least one finalizer to be removed") +// retryCAS retries the given operation with exponential backoff if the user +// didn't provide a version. This is intended to hide failures when the user +// isn't intentionally performing a CAS operation (all writes are, by design, +// CAS operations at the storage backend layer). +func (s *Server) retryCAS(ctx context.Context, vsn string, cas func() error) error { + if vsn != "" { + return cas() } - return nil -} - -func vetIfDeleteRelated(input, existing *pbresource.Resource, tenancyMarkedForDeletion bool) error { - // Keep track of whether this write is a normal write or a write that is related - // to deferred resource deletion involving setting the deletionTimestamp or the - // removal of finalizers. - deleteRelated := false - existingMarked := resource.IsMarkedForDeletion(existing) - inputMarked := resource.IsMarkedForDeletion(input) - - // Block removal of deletion timestamp - if !inputMarked && existingMarked { - return status.Errorf(codes.InvalidArgument, "cannot remove %s", resource.DeletionTimestampKey) - } - - // Block modification of existing deletion timestamp - if existing.Metadata[resource.DeletionTimestampKey] != "" && (existing.Metadata[resource.DeletionTimestampKey] != input.Metadata[resource.DeletionTimestampKey]) { - return status.Errorf(codes.InvalidArgument, "cannot modify %s", resource.DeletionTimestampKey) + const maxAttempts = 5 + + // These parameters are fairly arbitrary, so if you find better ones then go + // ahead and swap them out! In general, we want to wait long enough to smooth + // over small amounts of storage replication lag, but not so long that we make + // matters worse by holding onto load. + backoff := &retry.Waiter{ + MinWait: 50 * time.Millisecond, + MaxWait: 1 * time.Second, + Jitter: retry.NewJitter(50), + Factor: 75 * time.Millisecond, } - // Block writes that do more than just adding a deletion timestamp - if inputMarked && !existingMarked { - deleteRelated = deleteRelated || true - // Verify rest of resource is unchanged - if err := ensureMetadataSameExceptFor(input, existing, resource.DeletionTimestampKey); err != nil { - return err + var err error + for i := 1; i <= maxAttempts; i++ { + if err = cas(); !errors.Is(err, storage.ErrCASFailure) { + break } - if err := ensureDataUnchanged(input, existing); err != nil { - return err - } - } - - // Block no-op writes writes to resource that already has a deletion timestamp. The - // only valid writes should be removal of finalizers. - if inputMarked && existingMarked { - deleteRelated = deleteRelated || true - // Check if a no-op - errMetadataSame := ensureMetadataSameExceptFor(input, existing, resource.DeletionTimestampKey) - errDataUnchanged := ensureDataUnchanged(input, existing) - if errMetadataSame == nil && errDataUnchanged == nil { - return status.Error(codes.InvalidArgument, "cannot no-op write resource marked for deletion") + if backoff.Wait(ctx) != nil { + break } + s.Logger.Trace("retrying failed CAS operation", "failure_count", i) } - - // Block writes that do more than removing finalizers if previously marked for deletion. - if inputMarked && existingMarked && resource.HasFinalizers(existing) { - deleteRelated = deleteRelated || true - if err := ensureMetadataSameExceptFor(input, existing, resource.FinalizerKey); err != nil { - return err - } - if err := ensureDataUnchanged(input, existing); err != nil { - return err - } - if err := EnsureFinalizerRemoved(input, existing); err != nil { - return err - } - } - - // Classify writes that just remove finalizer as deleteRelated regardless of deletion state. - if err := EnsureFinalizerRemoved(input, existing); err == nil { - if err := ensureDataUnchanged(input, existing); err == nil { - deleteRelated = deleteRelated || true - } - } - - // Lastly, block writes when the resource's tenancy unit has been marked for deletion and - // the write is not related a valid delete scenario. - if tenancyMarkedForDeletion && !deleteRelated { - return status.Errorf(codes.InvalidArgument, "cannot write resource when tenancy marked for deletion: %s", existing.Id.Tenancy) - } - - return nil + return err } -// preserveDeferredDeletionMetadata only applies to user writes (Version == "") which is a precondition. -func preserveDeferredDeletionMetadata(input, existing *pbresource.Resource) { - // preserve existing deletionTimestamp if not provided in input - if !resource.IsMarkedForDeletion(input) && resource.IsMarkedForDeletion(existing) { - if input.Metadata == nil { - input.Metadata = make(map[string]string) - } - input.Metadata[resource.DeletionTimestampKey] = existing.Metadata[resource.DeletionTimestampKey] +func validateWriteRequest(req *pbresource.WriteRequest) error { + var field string + switch { + case req.Resource == nil: + field = "resource" + case req.Resource.Id == nil: + field = "resource.id" + case req.Resource.Data == nil: + field = "resource.data" } - // Only preserve finalizers if the is key absent from input and present in existing. - // If the key is present in input, the user clearly wants to remove finalizers! - inputHasKey := false - if input.Metadata != nil { - _, inputHasKey = input.Metadata[resource.FinalizerKey] + if field != "" { + return status.Errorf(codes.InvalidArgument, "%s is required", field) } - existingHasKey := false - if existing.Metadata != nil { - _, existingHasKey = existing.Metadata[resource.FinalizerKey] + if err := validateId(req.Resource.Id, "resource.id"); err != nil { + return err } - if !inputHasKey && existingHasKey { - if input.Metadata == nil { - input.Metadata = make(map[string]string) + if req.Resource.Owner != nil { + if err := validateId(req.Resource.Owner, "resource.owner"); err != nil { + return err } - input.Metadata[resource.FinalizerKey] = existing.Metadata[resource.FinalizerKey] } + return nil } diff --git a/agent/grpc-external/services/resource/write_mav_common_test.go b/agent/grpc-external/services/resource/write_mav_common_test.go deleted file mode 100644 index 6b6a82b44e9f9..0000000000000 --- a/agent/grpc-external/services/resource/write_mav_common_test.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package resource_test - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/protobuf/types/known/anypb" - - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/proto-public/pbresource" - pbdemov2 "github.com/hashicorp/consul/proto/private/pbdemo/v2" -) - -// Common test structs and test cases shared by the Write and MutateAndValidate RPCs -// only. These are not intended to be used by other tests. - -type resourceValidTestCase struct { - modFn func(artist, recordLabel *pbresource.Resource) *pbresource.Resource - errContains string -} - -func resourceValidTestCases(t *testing.T) map[string]resourceValidTestCase { - return map[string]resourceValidTestCase{ - "no resource": { - modFn: func(_, _ *pbresource.Resource) *pbresource.Resource { - return nil - }, - errContains: "resource is required", - }, - "no id": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id = nil - return artist - }, - errContains: "resource.id is required", - }, - "no type": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Type = nil - return artist - }, - errContains: "resource.id.type is required", - }, - "no name": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Name = "" - return artist - }, - errContains: "resource.id.name invalid", - }, - "name is mixed case": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Name = "MixedCaseNotAllowed" - return artist - }, - errContains: "resource.id.name invalid", - }, - "name too long": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Name = strings.Repeat("a", resource.MaxNameLength+1) - return artist - }, - errContains: "resource.id.name invalid", - }, - "wrong data type": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - var err error - artist.Data, err = anypb.New(&pbdemov2.Album{}) - require.NoError(t, err) - return artist - }, - errContains: "resource.data is of wrong type", - }, - "partition is mixed case": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy.Partition = "Default" - return artist - }, - errContains: "resource.id.tenancy.partition invalid", - }, - "partition too long": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) - return artist - }, - errContains: "resource.id.tenancy.partition invalid", - }, - "namespace is mixed case": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy.Namespace = "Default" - return artist - }, - errContains: "resource.id.tenancy.namespace invalid", - }, - "namespace too long": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) - return artist - }, - errContains: "resource.id.tenancy.namespace invalid", - }, - "fail validation hook": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - buffer := &pbdemov2.Artist{} - require.NoError(t, artist.Data.UnmarshalTo(buffer)) - buffer.Name = "" // name cannot be empty - require.NoError(t, artist.Data.MarshalFrom(buffer)) - return artist - }, - errContains: "artist.name required", - }, - "partition scope with non-empty namespace": { - modFn: func(_, recordLabel *pbresource.Resource) *pbresource.Resource { - recordLabel.Id.Tenancy.Namespace = "bogus" - return recordLabel - }, - errContains: "cannot have a namespace", - }, - } -} - -type ownerValidTestCase struct { - modFn func(res *pbresource.Resource) - errorContains string -} - -func ownerValidationTestCases(t *testing.T) map[string]ownerValidTestCase { - return map[string]ownerValidTestCase{ - "no owner type": { - modFn: func(res *pbresource.Resource) { res.Owner.Type = nil }, - errorContains: "resource.owner.type is required", - }, - "no owner name": { - modFn: func(res *pbresource.Resource) { res.Owner.Name = "" }, - errorContains: "resource.owner.name invalid", - }, - "mixed case owner name": { - modFn: func(res *pbresource.Resource) { res.Owner.Name = strings.ToUpper(res.Owner.Name) }, - errorContains: "resource.owner.name invalid", - }, - "owner name too long": { - modFn: func(res *pbresource.Resource) { - res.Owner.Name = strings.Repeat("a", resource.MaxNameLength+1) - }, - errorContains: "resource.owner.name invalid", - }, - "owner partition is mixed case": { - modFn: func(res *pbresource.Resource) { - res.Owner.Tenancy.Partition = "Default" - }, - errorContains: "resource.owner.tenancy.partition invalid", - }, - "owner partition too long": { - modFn: func(res *pbresource.Resource) { - res.Owner.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) - }, - errorContains: "resource.owner.tenancy.partition invalid", - }, - "owner namespace is mixed case": { - modFn: func(res *pbresource.Resource) { - res.Owner.Tenancy.Namespace = "Default" - }, - errorContains: "resource.owner.tenancy.namespace invalid", - }, - "owner namespace too long": { - modFn: func(res *pbresource.Resource) { - res.Owner.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) - }, - errorContains: "resource.owner.tenancy.namespace invalid", - }, - } -} - -// Test case struct shared by MutateAndValidate and Write success test cases -type mavOrWriteSuccessTestCase struct { - modFn func(artist, recordLabel *pbresource.Resource) *pbresource.Resource - expectedTenancy *pbresource.Tenancy -} - -// Test case struct shared by MutateAndValidate and Write success test cases -func mavOrWriteSuccessTestCases(t *testing.T) map[string]mavOrWriteSuccessTestCase { - return map[string]mavOrWriteSuccessTestCase{ - "namespaced resource provides nonempty partition and namespace": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - return artist - }, - expectedTenancy: resource.DefaultNamespacedTenancy(), - }, - "namespaced resource inherits tokens partition when empty": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy.Partition = "" - return artist - }, - expectedTenancy: resource.DefaultNamespacedTenancy(), - }, - "namespaced resource inherits tokens namespace when empty": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy.Namespace = "" - return artist - }, - expectedTenancy: resource.DefaultNamespacedTenancy(), - }, - "namespaced resource inherits tokens partition and namespace when empty": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy.Partition = "" - artist.Id.Tenancy.Namespace = "" - return artist - }, - expectedTenancy: resource.DefaultNamespacedTenancy(), - }, - "namespaced resource inherits tokens partition and namespace when tenancy nil": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy = nil - return artist - }, - expectedTenancy: resource.DefaultNamespacedTenancy(), - }, - "partitioned resource provides nonempty partition": { - modFn: func(_, recordLabel *pbresource.Resource) *pbresource.Resource { - return recordLabel - }, - expectedTenancy: resource.DefaultPartitionedTenancy(), - }, - "partitioned resource inherits tokens partition when empty": { - modFn: func(_, recordLabel *pbresource.Resource) *pbresource.Resource { - recordLabel.Id.Tenancy.Partition = "" - return recordLabel - }, - expectedTenancy: resource.DefaultPartitionedTenancy(), - }, - "partitioned resource inherits tokens partition when tenancy nil": { - modFn: func(_, recordLabel *pbresource.Resource) *pbresource.Resource { - recordLabel.Id.Tenancy = nil - return recordLabel - }, - expectedTenancy: resource.DefaultPartitionedTenancy(), - }, - } -} - -// Test case struct shared by MutateAndValidate and Write test cases where tenancy is not found -type mavOrWriteTenancyNotFoundTestCase map[string]struct { - modFn func(artist, recordLabel *pbresource.Resource) *pbresource.Resource - errCode codes.Code - errContains string -} - -// Test case struct shared by MutateAndValidate and Write test cases where tenancy is not found -func mavOrWriteTenancyNotFoundTestCases(t *testing.T) mavOrWriteTenancyNotFoundTestCase { - return mavOrWriteTenancyNotFoundTestCase{ - "namespaced resource provides nonexistant partition": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy.Partition = "boguspartition" - return artist - }, - errCode: codes.InvalidArgument, - errContains: "partition not found", - }, - "namespaced resource provides nonexistant namespace": { - modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy.Namespace = "bogusnamespace" - return artist - }, - errCode: codes.InvalidArgument, - errContains: "namespace not found", - }, - "partitioned resource provides nonexistant partition": { - modFn: func(_, recordLabel *pbresource.Resource) *pbresource.Resource { - recordLabel.Id.Tenancy.Partition = "boguspartition" - return recordLabel - }, - errCode: codes.InvalidArgument, - errContains: "partition not found", - }, - } -} - -type mavOrWriteTenancyMarkedForDeletionTestCase struct { - modFn func(artist, recordLabel *pbresource.Resource, mockTenancyBridge *svc.MockTenancyBridge) *pbresource.Resource - errContains string -} - -func mavOrWriteTenancyMarkedForDeletionTestCases(t *testing.T) map[string]mavOrWriteTenancyMarkedForDeletionTestCase { - return map[string]mavOrWriteTenancyMarkedForDeletionTestCase{ - "namespaced resources partition marked for deletion": { - modFn: func(artist, _ *pbresource.Resource, mockTenancyBridge *svc.MockTenancyBridge) *pbresource.Resource { - mockTenancyBridge.On("IsPartitionMarkedForDeletion", "ap1").Return(true, nil) - return artist - }, - errContains: "tenancy marked for deletion", - }, - "namespaced resources namespace marked for deletion": { - modFn: func(artist, _ *pbresource.Resource, mockTenancyBridge *svc.MockTenancyBridge) *pbresource.Resource { - mockTenancyBridge.On("IsPartitionMarkedForDeletion", "ap1").Return(false, nil) - mockTenancyBridge.On("IsNamespaceMarkedForDeletion", "ap1", "ns1").Return(true, nil) - return artist - }, - errContains: "tenancy marked for deletion", - }, - "partitioned resources partition marked for deletion": { - modFn: func(_, recordLabel *pbresource.Resource, mockTenancyBridge *svc.MockTenancyBridge) *pbresource.Resource { - mockTenancyBridge.On("IsPartitionMarkedForDeletion", "ap1").Return(true, nil) - return recordLabel - }, - errContains: "tenancy marked for deletion", - }, - } -} diff --git a/agent/grpc-external/services/resource/write_status.go b/agent/grpc-external/services/resource/write_status.go index fc209cd912521..205918e1dc2b7 100644 --- a/agent/grpc-external/services/resource/write_status.go +++ b/agent/grpc-external/services/resource/write_status.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resource @@ -8,49 +8,25 @@ import ( "errors" "fmt" - "github.com/oklog/ulid/v2" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" + "github.com/oklog/ulid/v2" + "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" ) func (s *Server) WriteStatus(ctx context.Context, req *pbresource.WriteStatusRequest) (*pbresource.WriteStatusResponse, error) { - reg, err := s.validateWriteStatusRequest(req) - if err != nil { - return nil, err - } - - entMeta := v2TenancyToV1EntMeta(req.Id.Tenancy) - authz, authzContext, err := s.getAuthorizer(tokenFromContext(ctx), entMeta) + authz, err := s.getAuthorizer(tokenFromContext(ctx)) if err != nil { return nil, err } - // Apply defaults when tenancy units empty. - v1EntMetaToV2Tenancy(reg, entMeta, req.Id.Tenancy) - - // Check tenancy exists for the V2 resource. Ignore "marked for deletion" since status updates - // should still work regardless. - if err = tenancyExists(reg, s.TenancyBridge, req.Id.Tenancy, codes.InvalidArgument); err != nil { - return nil, err - } - - // Retrieve resource since ACL hook requires it. - existing, err := s.Backend.Read(ctx, storage.EventualConsistency, req.Id) - switch { - case errors.Is(err, storage.ErrNotFound): - return nil, status.Errorf(codes.NotFound, err.Error()) - case err != nil: - return nil, status.Errorf(codes.Internal, "failed read: %v", err) - } - - // Check write ACL. - err = reg.ACLs.Write(authz, authzContext, existing) + // check acls + err = authz.ToAllowAuthorizer().OperatorWriteAllowed(&acl.AuthorizerContext{}) switch { case acl.IsErrPermissionDenied(err): return nil, status.Error(codes.PermissionDenied, err.Error()) @@ -58,6 +34,15 @@ func (s *Server) WriteStatus(ctx context.Context, req *pbresource.WriteStatusReq return nil, status.Errorf(codes.Internal, "failed operator:write allowed acl: %v", err) } + if err := validateWriteStatusRequest(req); err != nil { + return nil, err + } + + _, err = s.resolveType(req.Id.Type) + if err != nil { + return nil, err + } + // At the storage backend layer, all writes are CAS operations. // // See comment in write.go for more information. @@ -113,13 +98,15 @@ func (s *Server) WriteStatus(ctx context.Context, req *pbresource.WriteStatusReq return &pbresource.WriteStatusResponse{Resource: result}, nil } -func (s *Server) validateWriteStatusRequest(req *pbresource.WriteStatusRequest) (*resource.Registration, error) { +func validateWriteStatusRequest(req *pbresource.WriteStatusRequest) error { var field string switch { case req.Id == nil: field = "id" case req.Id.Type == nil: field = "id.type" + case req.Id.Tenancy == nil: + field = "id.tenancy" case req.Id.Name == "": field = "id.name" case req.Id.Uid == "": @@ -156,53 +143,16 @@ func (s *Server) validateWriteStatusRequest(req *pbresource.WriteStatusRequest) } } if field != "" { - return nil, status.Errorf(codes.InvalidArgument, "%s is required", field) + return status.Errorf(codes.InvalidArgument, "%s is required", field) } if req.Status.UpdatedAt != nil { - return nil, status.Error(codes.InvalidArgument, "status.updated_at is automatically set and cannot be provided") + return status.Error(codes.InvalidArgument, "status.updated_at is automatically set and cannot be provided") } if _, err := ulid.ParseStrict(req.Status.ObservedGeneration); err != nil { - return nil, status.Error(codes.InvalidArgument, "status.observed_generation is not valid") - } - - // Better UX: Allow callers to pass in nil tenancy. Defaulting and inheritance of tenancy - // from the request token will take place further down in the call flow. - if req.Id.Tenancy == nil { - req.Id.Tenancy = &pbresource.Tenancy{ - Partition: "", - Namespace: "", - } - } - - if err := validateId(req.Id, "id"); err != nil { - return nil, err - } - - for i, condition := range req.Status.Conditions { - if condition.Resource != nil { - if err := validateRef(condition.Resource, fmt.Sprintf("status.conditions[%d].resource", i)); err != nil { - return nil, err - } - } - } - - // Check type exists. - reg, err := s.resolveType(req.Id.Type) - if err != nil { - return nil, err - } - - // Check scope. - if reg.Scope == resource.ScopePartition && req.Id.Tenancy.Namespace != "" { - return nil, status.Errorf( - codes.InvalidArgument, - "partition scoped resource %s cannot have a namespace. got: %s", - resource.ToGVK(req.Id.Type), - req.Id.Tenancy.Namespace, - ) + return status.Error(codes.InvalidArgument, "status.observed_generation is not valid") } - return reg, nil + return nil } diff --git a/agent/grpc-external/services/resource/write_status_test.go b/agent/grpc-external/services/resource/write_status_test.go index 57431eac54e3b..aa26330176df7 100644 --- a/agent/grpc-external/services/resource/write_status_test.go +++ b/agent/grpc-external/services/resource/write_status_test.go @@ -1,11 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -package resource_test +package resource import ( "fmt" - "strings" "testing" "github.com/oklog/ulid/v2" @@ -16,15 +15,11 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "github.com/hashicorp/consul/acl/resolver" - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" "github.com/hashicorp/consul/proto-public/pbresource" ) -// TODO: Update all tests to use true/false table test for v2tenancy - func TestWriteStatus_ACL(t *testing.T) { type testCase struct { authz resolver.Result @@ -32,7 +27,7 @@ func TestWriteStatus_ACL(t *testing.T) { } testcases := map[string]testCase{ "denied": { - authz: AuthorizerFrom(t, demo.ArtistV2ReadPolicy), + authz: AuthorizerFrom(t, demo.ArtistV2WritePolicy), assertErrFn: func(err error) { require.Error(t, err) require.Equal(t, codes.PermissionDenied.String(), status.Code(err).String()) @@ -48,8 +43,14 @@ func TestWriteStatus_ACL(t *testing.T) { for desc, tc := range testcases { t.Run(desc, func(t *testing.T) { - builder := svctest.NewResourceServiceBuilder().WithRegisterFns(demo.RegisterTypes) - client := builder.Run(t) + server := testServer(t) + client := testClient(t, server) + + mockACLResolver := &MockACLResolver{} + mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(tc.authz, nil) + server.ACLResolver = mockACLResolver + demo.RegisterTypes(server.Registry) artist, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -58,12 +59,6 @@ func TestWriteStatus_ACL(t *testing.T) { require.NoError(t, err) artist = rsp.Resource - // Defer mocking out authz since above write is necessary to set up the test resource. - mockACLResolver := &svc.MockACLResolver{} - mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). - Return(tc.authz, nil) - builder.ServiceImpl().Config.ACLResolver = mockACLResolver - // exercise ACL _, err = client.WriteStatus(testContext(t), validWriteStatusRequest(t, artist)) tc.assertErrFn(err) @@ -72,186 +67,41 @@ func TestWriteStatus_ACL(t *testing.T) { } func TestWriteStatus_InputValidation(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - testCases := map[string]struct { - typ *pbresource.Type - modFn func(req *pbresource.WriteStatusRequest) - errContains string - }{ - "no id": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id = nil }, - errContains: "id is required", - }, - "no type": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Type = nil }, - errContains: "id.type is required", - }, - "no name": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Name = "" }, - errContains: "id.name is required", - }, - "no uid": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Uid = "" }, - errContains: "id.uid is required", - }, - "name mixed case": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Name = "U2" }, - errContains: "id.name invalid", - }, - "name too long": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { - req.Id.Name = strings.Repeat("a", resource.MaxNameLength+1) - }, - errContains: "id.name invalid", - }, - "partition mixed case": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "Default" }, - errContains: "id.tenancy.partition invalid", - }, - "partition too long": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { - req.Id.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) - }, - errContains: "id.tenancy.partition invalid", - }, - "namespace mixed case": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Namespace = "Default" }, - errContains: "id.tenancy.namespace invalid", - }, - "namespace too long": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { - req.Id.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) - }, - errContains: "id.tenancy.namespace invalid", - }, - "no key": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Key = "" }, - errContains: "key is required", - }, - "no status": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status = nil }, - errContains: "status is required", - }, - "no observed generation": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.ObservedGeneration = "" }, - errContains: "status.observed_generation is required", - }, - "bad observed generation": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.ObservedGeneration = "bogus" }, - errContains: "status.observed_generation is not valid", - }, - "no condition type": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Type = "" }, - errContains: "status.conditions[0].type is required", - }, - "no reference type": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Type = nil }, - errContains: "status.conditions[0].resource.type is required", - }, - "no reference tenancy": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Tenancy = nil }, - errContains: "status.conditions[0].resource.tenancy is required", - }, - "no reference name": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Name = "" }, - errContains: "status.conditions[0].resource.name is required", - }, - "reference name mixed case": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Name = "U2" }, - errContains: "status.conditions[0].resource.name invalid", - }, - "reference name too long": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { - req.Status.Conditions[0].Resource.Name = strings.Repeat("r", resource.MaxNameLength+1) - }, - errContains: "status.conditions[0].resource.name invalid", - }, - "reference partition mixed case": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { - req.Status.Conditions[0].Resource.Tenancy.Partition = "Default" - }, - errContains: "status.conditions[0].resource.tenancy.partition invalid", - }, - "reference partition too long": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { - req.Status.Conditions[0].Resource.Tenancy.Partition = strings.Repeat("p", resource.MaxNameLength+1) - }, - errContains: "status.conditions[0].resource.tenancy.partition invalid", - }, - "reference namespace mixed case": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { - req.Status.Conditions[0].Resource.Tenancy.Namespace = "Default" - }, - errContains: "status.conditions[0].resource.tenancy.namespace invalid", - }, - "reference namespace too long": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { - req.Status.Conditions[0].Resource.Tenancy.Namespace = strings.Repeat("n", resource.MaxNameLength+1) - }, - errContains: "status.conditions[0].resource.tenancy.namespace invalid", - }, - "updated at provided": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Status.UpdatedAt = timestamppb.Now() }, - errContains: "status.updated_at is automatically set and cannot be provided", - }, - "partition scoped type provides namespace in tenancy": { - typ: demo.TypeV1RecordLabel, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Namespace = "bad" }, - errContains: "cannot have a namespace", - }, + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) + + testCases := map[string]func(*pbresource.WriteStatusRequest){ + "no id": func(req *pbresource.WriteStatusRequest) { req.Id = nil }, + "no type": func(req *pbresource.WriteStatusRequest) { req.Id.Type = nil }, + "no tenancy": func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy = nil }, + "no name": func(req *pbresource.WriteStatusRequest) { req.Id.Name = "" }, + "no uid": func(req *pbresource.WriteStatusRequest) { req.Id.Uid = "" }, + "no key": func(req *pbresource.WriteStatusRequest) { req.Key = "" }, + "no status": func(req *pbresource.WriteStatusRequest) { req.Status = nil }, + "no observed generation": func(req *pbresource.WriteStatusRequest) { req.Status.ObservedGeneration = "" }, + "bad observed generation": func(req *pbresource.WriteStatusRequest) { req.Status.ObservedGeneration = "bogus" }, + "no condition type": func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Type = "" }, + "no reference type": func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Type = nil }, + "no reference tenancy": func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Tenancy = nil }, + "no reference name": func(req *pbresource.WriteStatusRequest) { req.Status.Conditions[0].Resource.Name = "" }, + "updated at provided": func(req *pbresource.WriteStatusRequest) { req.Status.UpdatedAt = timestamppb.Now() }, } - for desc, tc := range testCases { + for desc, modFn := range testCases { t.Run(desc, func(t *testing.T) { - var res *pbresource.Resource - var err error - switch { - case resource.EqualType(demo.TypeV2Artist, tc.typ): - res, err = demo.GenerateV2Artist() - case resource.EqualType(demo.TypeV1RecordLabel, tc.typ): - res, err = demo.GenerateV1RecordLabel("looney-tunes") - default: - t.Fatal("unsupported type", tc.typ) - } + res, err := demo.GenerateV2Artist() require.NoError(t, err) res.Id.Uid = ulid.Make().String() res.Generation = ulid.Make().String() req := validWriteStatusRequest(t, res) - tc.modFn(req) + modFn(req) _, err = client.WriteStatus(testContext(t), req) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, tc.errContains) }) } } @@ -262,9 +112,10 @@ func TestWriteStatus_Success(t *testing.T) { "Non CAS": func(req *pbresource.WriteStatusRequest) { req.Version = "" }, } { t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -296,145 +147,11 @@ func TestWriteStatus_Success(t *testing.T) { } } -func TestWriteStatus_Tenancy_Defaults(t *testing.T) { - for desc, tc := range map[string]struct { - scope resource.Scope - modFn func(req *pbresource.WriteStatusRequest) - }{ - "namespaced resource provides nonempty partition and namespace": { - scope: resource.ScopeNamespace, - modFn: func(req *pbresource.WriteStatusRequest) {}, - }, - "namespaced resource inherits tokens partition when empty": { - scope: resource.ScopeNamespace, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "" }, - }, - "namespaced resource inherits tokens namespace when empty": { - scope: resource.ScopeNamespace, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Namespace = "" }, - }, - "namespaced resource inherits tokens partition and namespace when empty": { - scope: resource.ScopeNamespace, - modFn: func(req *pbresource.WriteStatusRequest) { - req.Id.Tenancy.Partition = "" - req.Id.Tenancy.Namespace = "" - }, - }, - "namespaced resource inherits tokens partition and namespace when tenancy nil": { - scope: resource.ScopeNamespace, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy = nil }, - }, - "partitioned resource provides nonempty partition": { - scope: resource.ScopePartition, - modFn: func(req *pbresource.WriteStatusRequest) {}, - }, - "partitioned resource inherits tokens partition when empty": { - scope: resource.ScopePartition, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "" }, - }, - } { - t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Pick resource based on scope of type in testcase. - var res *pbresource.Resource - var err error - switch tc.scope { - case resource.ScopeNamespace: - res, err = demo.GenerateV2Artist() - case resource.ScopePartition: - res, err = demo.GenerateV1RecordLabel("looney-tunes") - } - require.NoError(t, err) - - // Write resource so we can update status later. - writeRsp, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: res}) - require.NoError(t, err) - res = writeRsp.Resource - require.Nil(t, res.Status) - - // Write status with tenancy modded by testcase. - req := validWriteStatusRequest(t, res) - tc.modFn(req) - rsp, err := client.WriteStatus(testContext(t), req) - require.NoError(t, err) - res = rsp.Resource - - // Re-read resource and verify status successfully written (not nil) - _, err = client.Read(testContext(t), &pbresource.ReadRequest{Id: res.Id}) - require.NoError(t, err) - res = rsp.Resource - require.NotNil(t, res.Status) - }) - } -} - -func TestWriteStatus_Tenancy_NotFound(t *testing.T) { - for desc, tc := range map[string]struct { - scope resource.Scope - modFn func(req *pbresource.WriteStatusRequest) - errCode codes.Code - errContains string - }{ - "namespaced resource provides nonexistant partition": { - scope: resource.ScopeNamespace, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "bad" }, - errCode: codes.InvalidArgument, - errContains: "partition", - }, - "namespaced resource provides nonexistant namespace": { - scope: resource.ScopeNamespace, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Namespace = "bad" }, - errCode: codes.InvalidArgument, - errContains: "namespace", - }, - "partitioned resource provides nonexistant partition": { - scope: resource.ScopePartition, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "bad" }, - errCode: codes.InvalidArgument, - errContains: "partition", - }, - } { - t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(true). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Pick resource based on scope of type in testcase. - var res *pbresource.Resource - var err error - switch tc.scope { - case resource.ScopeNamespace: - res, err = demo.GenerateV2Artist() - case resource.ScopePartition: - res, err = demo.GenerateV1RecordLabel("looney-tunes") - } - require.NoError(t, err) - - // Fill in required fields so validation continues until tenancy is checked - req := validWriteStatusRequest(t, res) - req.Id.Uid = ulid.Make().String() - req.Status.ObservedGeneration = ulid.Make().String() - - // Write status with tenancy modded by testcase. - tc.modFn(req) - _, err = client.WriteStatus(testContext(t), req) - - // Verify non-existant tenancy field is the cause of the error. - require.Error(t, err) - require.Equal(t, tc.errCode.String(), status.Code(err).String()) - require.Contains(t, err.Error(), tc.errContains) - }) - } -} - func TestWriteStatus_CASFailure(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -452,7 +169,8 @@ func TestWriteStatus_CASFailure(t *testing.T) { } func TestWriteStatus_TypeNotFound(t *testing.T) { - client := svctest.NewResourceServiceBuilder().Run(t) + server := testServer(t) + client := testClient(t, server) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -466,9 +184,9 @@ func TestWriteStatus_TypeNotFound(t *testing.T) { } func TestWriteStatus_ResourceNotFound(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -481,9 +199,9 @@ func TestWriteStatus_ResourceNotFound(t *testing.T) { } func TestWriteStatus_WrongUid(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -518,8 +236,8 @@ func TestWriteStatus_NonCASUpdate_Retry(t *testing.T) { backend := &blockOnceBackend{ Backend: server.Backend, - readCompletedCh: make(chan struct{}), - blockCh: make(chan struct{}), + readCh: make(chan struct{}), + blockCh: make(chan struct{}), } server.Backend = backend @@ -534,7 +252,7 @@ func TestWriteStatus_NonCASUpdate_Retry(t *testing.T) { // Wait for the read, to ensure the Write in the goroutine above has read the // current version of the resource. - <-backend.readCompletedCh + <-backend.readCh // Update the resource. _, err = client.Write(testContext(t), &pbresource.WriteRequest{Resource: modifyArtist(t, res)}) @@ -550,49 +268,24 @@ func TestWriteStatus_NonCASUpdate_Retry(t *testing.T) { func validWriteStatusRequest(t *testing.T, res *pbresource.Resource) *pbresource.WriteStatusRequest { t.Helper() - switch { - case resource.EqualType(res.Id.Type, demo.TypeV2Artist): - album, err := demo.GenerateV2Album(res.Id) - require.NoError(t, err) - return &pbresource.WriteStatusRequest{ - Id: res.Id, - Version: res.Version, - Key: "consul.io/artist-controller", - Status: &pbresource.Status{ - ObservedGeneration: res.Generation, - Conditions: []*pbresource.Condition{ - { - Type: "AlbumCreated", - State: pbresource.Condition_STATE_TRUE, - Reason: "AlbumCreated", - Message: fmt.Sprintf("Album '%s' created", album.Id.Name), - Resource: resource.Reference(album.Id, ""), - }, - }, - }, - } - case resource.EqualType(res.Id.Type, demo.TypeV1RecordLabel): - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - return &pbresource.WriteStatusRequest{ - Id: res.Id, - Version: res.Version, - Key: "consul.io/recordlabel-controller", - Status: &pbresource.Status{ - ObservedGeneration: res.Generation, - Conditions: []*pbresource.Condition{ - { - Type: "ArtistCreated", - State: pbresource.Condition_STATE_TRUE, - Reason: "ArtistCreated", - Message: fmt.Sprintf("Artist '%s' created", artist.Id.Name), - Resource: resource.Reference(artist.Id, ""), - }, + album, err := demo.GenerateV2Album(res.Id) + require.NoError(t, err) + + return &pbresource.WriteStatusRequest{ + Id: res.Id, + Version: res.Version, + Key: "consul.io/artist-controller", + Status: &pbresource.Status{ + ObservedGeneration: res.Generation, + Conditions: []*pbresource.Condition{ + { + Type: "AlbumCreated", + State: pbresource.Condition_STATE_TRUE, + Reason: "AlbumCreated", + Message: fmt.Sprintf("Album '%s' created", album.Id.Name), + Resource: resource.Reference(album.Id, ""), }, }, - } - default: - t.Fatal("unsupported type", res.Id.Type) + }, } - return nil } diff --git a/agent/grpc-external/services/resource/write_test.go b/agent/grpc-external/services/resource/write_test.go index beb47b6f22e4e..4ec25ee26c0c7 100644 --- a/agent/grpc-external/services/resource/write_test.go +++ b/agent/grpc-external/services/resource/write_test.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -package resource_test +package resource import ( "context" + "sync/atomic" "testing" - "time" "github.com/oklog/ulid/v2" "github.com/stretchr/testify/mock" @@ -16,56 +16,112 @@ import ( "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/acl/resolver" - svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" - rtest "github.com/hashicorp/consul/internal/resource/resourcetest" + "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" - pbdemo "github.com/hashicorp/consul/proto/private/pbdemo/v1" pbdemov1 "github.com/hashicorp/consul/proto/private/pbdemo/v1" pbdemov2 "github.com/hashicorp/consul/proto/private/pbdemo/v2" - "github.com/hashicorp/consul/proto/private/prototest" ) -// TODO: Update all tests to use true/false table test for v2tenancy - func TestWrite_InputValidation(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) - for desc, tc := range resourceValidTestCases(t) { + testCases := map[string]func(*pbresource.WriteRequest){ + "no resource": func(req *pbresource.WriteRequest) { req.Resource = nil }, + "no id": func(req *pbresource.WriteRequest) { req.Resource.Id = nil }, + "no type": func(req *pbresource.WriteRequest) { req.Resource.Id.Type = nil }, + "no tenancy": func(req *pbresource.WriteRequest) { req.Resource.Id.Tenancy = nil }, + "no name": func(req *pbresource.WriteRequest) { req.Resource.Id.Name = "" }, + "no data": func(req *pbresource.WriteRequest) { req.Resource.Data = nil }, + // clone necessary to not pollute DefaultTenancy + "tenancy partition not default": func(req *pbresource.WriteRequest) { + req.Resource.Id.Tenancy = clone(req.Resource.Id.Tenancy) + req.Resource.Id.Tenancy.Partition = "" + }, + "tenancy namespace not default": func(req *pbresource.WriteRequest) { + req.Resource.Id.Tenancy = clone(req.Resource.Id.Tenancy) + req.Resource.Id.Tenancy.Namespace = "" + }, + "tenancy peername not local": func(req *pbresource.WriteRequest) { + req.Resource.Id.Tenancy = clone(req.Resource.Id.Tenancy) + req.Resource.Id.Tenancy.PeerName = "" + }, + "wrong data type": func(req *pbresource.WriteRequest) { + var err error + req.Resource.Data, err = anypb.New(&pbdemov2.Album{}) + require.NoError(t, err) + }, + "fail validation hook": func(req *pbresource.WriteRequest) { + artist := &pbdemov2.Artist{} + require.NoError(t, req.Resource.Data.UnmarshalTo(artist)) + artist.Name = "" // name cannot be empty + require.NoError(t, req.Resource.Data.MarshalFrom(artist)) + }, + } + for desc, modFn := range testCases { t.Run(desc, func(t *testing.T) { - artist, err := demo.GenerateV2Artist() + res, err := demo.GenerateV2Artist() require.NoError(t, err) - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) + req := &pbresource.WriteRequest{Resource: res} + modFn(req) - req := &pbresource.WriteRequest{Resource: tc.modFn(artist, recordLabel)} _, err = client.Write(testContext(t), req) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, tc.errContains) }) } } func TestWrite_OwnerValidation(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) - testCases := ownerValidationTestCases(t) + demo.RegisterTypes(server.Registry) - // This is not part of ownerValidationTestCases because it is a special case - // that only gets caught deeper into the write path. - testCases["no owner tenancy"] = ownerValidTestCase{ - modFn: func(res *pbresource.Resource) { res.Owner.Tenancy = nil }, - errorContains: "resource.owner does not exist", + type testCase struct { + modReqFn func(req *pbresource.WriteRequest) + errorContains string + } + testCases := map[string]testCase{ + "no owner type": { + modReqFn: func(req *pbresource.WriteRequest) { req.Resource.Owner.Type = nil }, + errorContains: "resource.owner.type", + }, + "no owner tenancy": { + modReqFn: func(req *pbresource.WriteRequest) { req.Resource.Owner.Tenancy = nil }, + errorContains: "resource.owner.tenancy", + }, + "no owner name": { + modReqFn: func(req *pbresource.WriteRequest) { req.Resource.Owner.Name = "" }, + errorContains: "resource.owner.name", + }, + // clone necessary to not pollute DefaultTenancy + "owner tenancy partition not default": { + modReqFn: func(req *pbresource.WriteRequest) { + req.Resource.Owner.Tenancy = clone(req.Resource.Owner.Tenancy) + req.Resource.Owner.Tenancy.Partition = "" + }, + errorContains: "resource.owner.tenancy.partition", + }, + "owner tenancy namespace not default": { + modReqFn: func(req *pbresource.WriteRequest) { + req.Resource.Owner.Tenancy = clone(req.Resource.Owner.Tenancy) + req.Resource.Owner.Tenancy.Namespace = "" + }, + errorContains: "resource.owner.tenancy.namespace", + }, + "owner tenancy peername not local": { + modReqFn: func(req *pbresource.WriteRequest) { + req.Resource.Owner.Tenancy = clone(req.Resource.Owner.Tenancy) + req.Resource.Owner.Tenancy.PeerName = "" + }, + errorContains: "resource.owner.tenancy.peername", + }, } - for desc, tc := range testCases { t.Run(desc, func(t *testing.T) { artist, err := demo.GenerateV2Artist() @@ -74,9 +130,10 @@ func TestWrite_OwnerValidation(t *testing.T) { album, err := demo.GenerateV2Album(artist.Id) require.NoError(t, err) - tc.modFn(album) + albumReq := &pbresource.WriteRequest{Resource: album} + tc.modReqFn(albumReq) - _, err = client.Write(testContext(t), &pbresource.WriteRequest{Resource: album}) + _, err = client.Write(testContext(t), albumReq) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) require.ErrorContains(t, err, tc.errorContains) @@ -85,7 +142,8 @@ func TestWrite_OwnerValidation(t *testing.T) { } func TestWrite_TypeNotFound(t *testing.T) { - client := svctest.NewResourceServiceBuilder().Run(t) + server := testServer(t) + client := testClient(t, server) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -119,14 +177,14 @@ func TestWrite_ACLs(t *testing.T) { for desc, tc := range testcases { t.Run(desc, func(t *testing.T) { - mockACLResolver := &svc.MockACLResolver{} + server := testServer(t) + client := testClient(t, server) + + mockACLResolver := &MockACLResolver{} mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). Return(tc.authz, nil) - - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - WithACLResolver(mockACLResolver). - Run(t) + server.ACLResolver = mockACLResolver + demo.RegisterTypes(server.Registry) artist, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -139,9 +197,9 @@ func TestWrite_ACLs(t *testing.T) { } func TestWrite_Mutate(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + demo.RegisterTypes(server.Registry) artist, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -163,102 +221,27 @@ func TestWrite_Mutate(t *testing.T) { require.Equal(t, pbdemov2.Genre_GENRE_DISCO, artistData.Genre) } -func TestWrite_Create_Success(t *testing.T) { - for desc, tc := range mavOrWriteSuccessTestCases(t) { - t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - - rsp, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: tc.modFn(artist, recordLabel)}) - require.NoError(t, err) - require.NotEmpty(t, rsp.Resource.Version, "resource should have version") - require.NotEmpty(t, rsp.Resource.Id.Uid, "resource id should have uid") - require.NotEmpty(t, rsp.Resource.Generation, "resource should have generation") - prototest.AssertDeepEqual(t, tc.expectedTenancy, rsp.Resource.Id.Tenancy) - }) - } -} - -func TestWrite_Create_Tenancy_NotFound(t *testing.T) { - for desc, tc := range mavOrWriteTenancyNotFoundTestCases(t) { - t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(true). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - - _, err = client.Write(testContext(t), &pbresource.WriteRequest{Resource: tc.modFn(artist, recordLabel)}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), tc.errContains) - }) - } -} - -func TestWrite_Create_With_DeletionTimestamp_Fails(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(true). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - res := rtest.Resource(demo.TypeV1Artist, "blur"). - WithTenancy(resource.DefaultNamespacedTenancy()). - WithData(t, &pbdemov1.Artist{Name: "Blur"}). - WithMeta(resource.DeletionTimestampKey, time.Now().Format(time.RFC3339)). - Build() - - _, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: res}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), resource.DeletionTimestampKey) -} - -func TestWrite_Create_With_TenancyMarkedForDeletion_Fails(t *testing.T) { - for desc, tc := range mavOrWriteTenancyMarkedForDeletionTestCases(t) { - t.Run(desc, func(t *testing.T) { - server := testServer(t) - client := testClient(t, server) - demo.RegisterTypes(server.Registry) - - recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") - require.NoError(t, err) - recordLabel.Id.Tenancy.Partition = "ap1" +func TestWrite_ResourceCreation_Success(t *testing.T) { + server := testServer(t) + client := testClient(t, server) - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - artist.Id.Tenancy.Partition = "ap1" - artist.Id.Tenancy.Namespace = "ns1" + demo.RegisterTypes(server.Registry) - mockTenancyBridge := &svc.MockTenancyBridge{} - mockTenancyBridge.On("PartitionExists", "ap1").Return(true, nil) - mockTenancyBridge.On("NamespaceExists", "ap1", "ns1").Return(true, nil) - server.TenancyBridge = mockTenancyBridge + res, err := demo.GenerateV2Artist() + require.NoError(t, err) - _, err = client.Write(testContext(t), &pbresource.WriteRequest{Resource: tc.modFn(artist, recordLabel, mockTenancyBridge)}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), tc.errContains) - }) - } + rsp, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: res}) + require.NoError(t, err) + require.NotEmpty(t, rsp.Resource.Version, "resource should have version") + require.NotEmpty(t, rsp.Resource.Id.Uid, "resource id should have uid") + require.NotEmpty(t, rsp.Resource.Generation, "resource should have generation") } func TestWrite_CASUpdate_Success(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -277,9 +260,10 @@ func TestWrite_CASUpdate_Success(t *testing.T) { } func TestWrite_ResourceCreation_StatusProvided(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -295,9 +279,10 @@ func TestWrite_ResourceCreation_StatusProvided(t *testing.T) { } func TestWrite_CASUpdate_Failure(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -315,9 +300,10 @@ func TestWrite_CASUpdate_Failure(t *testing.T) { } func TestWrite_Update_WrongUid(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -335,9 +321,10 @@ func TestWrite_Update_WrongUid(t *testing.T) { } func TestWrite_Update_StatusModified(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -349,7 +336,7 @@ func TestWrite_Update_StatusModified(t *testing.T) { require.NoError(t, err) res = statusRsp.Resource - // Passing the status unmodified should be fine. + // Passing the staus unmodified should be fine. rsp2, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: res}) require.NoError(t, err) @@ -364,9 +351,10 @@ func TestWrite_Update_StatusModified(t *testing.T) { } func TestWrite_Update_NilStatus(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -387,9 +375,10 @@ func TestWrite_Update_NilStatus(t *testing.T) { } func TestWrite_Update_NoUid(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -405,9 +394,10 @@ func TestWrite_Update_NoUid(t *testing.T) { } func TestWrite_Update_GroupVersion(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -434,9 +424,10 @@ func TestWrite_Update_GroupVersion(t *testing.T) { } func TestWrite_NonCASUpdate_Success(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -456,6 +447,7 @@ func TestWrite_NonCASUpdate_Success(t *testing.T) { func TestWrite_NonCASUpdate_Retry(t *testing.T) { server := testServer(t) client := testClient(t, server) + demo.RegisterTypes(server.Registry) res, err := demo.GenerateV2Artist() @@ -469,8 +461,8 @@ func TestWrite_NonCASUpdate_Retry(t *testing.T) { backend := &blockOnceBackend{ Backend: server.Backend, - readCompletedCh: make(chan struct{}), - blockCh: make(chan struct{}), + readCh: make(chan struct{}), + blockCh: make(chan struct{}), } server.Backend = backend @@ -485,7 +477,7 @@ func TestWrite_NonCASUpdate_Retry(t *testing.T) { // Wait for the read, to ensure the Write in the goroutine above has read the // current version of the resource. - <-backend.readCompletedCh + <-backend.readCh // Update the resource. res = modifyArtist(t, rsp1.Resource) @@ -499,27 +491,14 @@ func TestWrite_NonCASUpdate_Retry(t *testing.T) { require.NoError(t, <-errCh) } -func TestWrite_NoData(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - res, err := demo.GenerateV1Concept("jazz") - require.NoError(t, err) - - rsp, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: res}) - require.NoError(t, err) - require.NotEmpty(t, rsp.Resource.Version) - require.Equal(t, rsp.Resource.Id.Name, "jazz") -} - func TestWrite_Owner_Immutable(t *testing.T) { // Use of proto.Equal(..) in implementation covers all permutations // (nil -> non-nil, non-nil -> nil, owner1 -> owner2) so only the first one // is tested. - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) artist, err := demo.GenerateV2Artist() require.NoError(t, err) @@ -544,9 +523,10 @@ func TestWrite_Owner_Immutable(t *testing.T) { } func TestWrite_Owner_Uid(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + server := testServer(t) + client := testClient(t, server) + + demo.RegisterTypes(server.Registry) t.Run("uid given", func(t *testing.T) { artist, err := demo.GenerateV2Artist() @@ -612,317 +592,23 @@ func TestWrite_Owner_Uid(t *testing.T) { }) } -func TestEnsureFinalizerRemoved(t *testing.T) { - type testCase struct { - mod func(input, existing *pbresource.Resource) - errContains string - } - - testCases := map[string]testCase{ - "one finalizer removed from input": { - mod: func(input, existing *pbresource.Resource) { - resource.AddFinalizer(existing, "f1") - resource.AddFinalizer(existing, "f2") - resource.AddFinalizer(input, "f1") - }, - }, - "all finalizers removed from input": { - mod: func(input, existing *pbresource.Resource) { - resource.AddFinalizer(existing, "f1") - resource.AddFinalizer(existing, "f2") - resource.AddFinalizer(input, "f1") - resource.RemoveFinalizer(input, "f1") - }, - }, - "all finalizers removed from input and no finalizer key": { - mod: func(input, existing *pbresource.Resource) { - resource.AddFinalizer(existing, "f1") - resource.AddFinalizer(existing, "f2") - }, - }, - "no finalizers removed from input": { - mod: func(input, existing *pbresource.Resource) { - resource.AddFinalizer(existing, "f1") - resource.AddFinalizer(input, "f1") - }, - errContains: "expected at least one finalizer to be removed", - }, - "input finalizers not proper subset of existing": { - mod: func(input, existing *pbresource.Resource) { - resource.AddFinalizer(existing, "f1") - resource.AddFinalizer(existing, "f2") - resource.AddFinalizer(input, "f3") - }, - errContains: "expected at least one finalizer to be removed", - }, - "existing has no finalizers for input to remove": { - mod: func(input, existing *pbresource.Resource) { - resource.AddFinalizer(input, "f3") - }, - errContains: "expected at least one finalizer to be removed", - }, - } +type blockOnceBackend struct { + storage.Backend - for desc, tc := range testCases { - t.Run(desc, func(t *testing.T) { - input := rtest.Resource(demo.TypeV1Artist, "artist1"). - WithTenancy(resource.DefaultNamespacedTenancy()). - WithData(t, &pbdemov1.Artist{Name: "artist1"}). - WithMeta(resource.DeletionTimestampKey, "someTimestamp"). - Build() - - existing := rtest.Resource(demo.TypeV1Artist, "artist1"). - WithTenancy(resource.DefaultNamespacedTenancy()). - WithData(t, &pbdemov1.Artist{Name: "artist1"}). - WithMeta(resource.DeletionTimestampKey, "someTimestamp"). - Build() - - tc.mod(input, existing) - - err := svc.EnsureFinalizerRemoved(input, existing) - if tc.errContains != "" { - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, tc.errContains) - } else { - require.NoError(t, err) - } - }) - } + done uint32 + readCh chan struct{} + blockCh chan struct{} } -func TestWrite_ResourceFrozenAfterMarkedForDeletion(t *testing.T) { - type testCase struct { - modFn func(res *pbresource.Resource) - errContains string - } - testCases := map[string]testCase{ - "no-op write rejected": { - modFn: func(res *pbresource.Resource) {}, - errContains: "cannot no-op write resource marked for deletion", - }, - "remove one finalizer": { - modFn: func(res *pbresource.Resource) { - resource.RemoveFinalizer(res, "finalizer1") - }, - }, - "remove all finalizers": { - modFn: func(res *pbresource.Resource) { - resource.RemoveFinalizer(res, "finalizer1") - resource.RemoveFinalizer(res, "finalizer2") - }, - }, - "adding finalizer fails": { - modFn: func(res *pbresource.Resource) { - resource.AddFinalizer(res, "finalizer3") - }, - errContains: "expected at least one finalizer to be removed", - }, - "remove deletionTimestamp fails": { - modFn: func(res *pbresource.Resource) { - delete(res.Metadata, resource.DeletionTimestampKey) - }, - errContains: "cannot remove deletionTimestamp", - }, - "modify deletionTimestamp fails": { - modFn: func(res *pbresource.Resource) { - res.Metadata[resource.DeletionTimestampKey] = "bad" - }, - errContains: "cannot modify deletionTimestamp", - }, - "modify data fails": { - modFn: func(res *pbresource.Resource) { - var err error - res.Data, err = anypb.New(&pbdemo.Artist{Name: "New Order"}) - require.NoError(t, err) - }, - errContains: "cannot modify data", - }, - } - - for desc, tc := range testCases { - t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(true). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Create a resource with finalizers - res := rtest.Resource(demo.TypeV1Artist, "joydivision"). - WithTenancy(resource.DefaultNamespacedTenancy()). - WithData(t, &pbdemo.Artist{Name: "Joy Division"}). - WithMeta(resource.FinalizerKey, "finalizer1 finalizer2"). - Write(t, client) - - // Mark for deletion - resource should now be frozen - _, err := client.Delete(context.Background(), &pbresource.DeleteRequest{Id: res.Id}) - require.NoError(t, err) - - // Verify marked for deletion - rsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: res.Id}) - require.NoError(t, err) - require.True(t, resource.IsMarkedForDeletion(rsp.Resource)) +func (b *blockOnceBackend) Read(ctx context.Context, consistency storage.ReadConsistency, id *pbresource.ID) (*pbresource.Resource, error) { + res, err := b.Backend.Read(ctx, consistency, id) - // Apply test case mods - tc.modFn(rsp.Resource) - - // Verify write results - _, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: rsp.Resource}) - if tc.errContains == "" { - require.NoError(t, err) - } else { - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, tc.errContains) - } - }) - } -} - -func TestWrite_NonCASWritePreservesFinalizers(t *testing.T) { - type testCase struct { - existingMeta map[string]string - inputMeta map[string]string - expectedMeta map[string]string - } - testCases := map[string]testCase{ - "input nil metadata preserves existing finalizers": { - inputMeta: nil, - existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, - expectedMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, - }, - "input metadata and no finalizer key preserves existing finalizers": { - inputMeta: map[string]string{}, - existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, - expectedMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, - }, - "input metadata and with empty finalizer key overwrites existing finalizers": { - inputMeta: map[string]string{resource.FinalizerKey: ""}, - existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, - expectedMeta: map[string]string{resource.FinalizerKey: ""}, - }, - "input metadata with one finalizer key overwrites multiple existing finalizers": { - inputMeta: map[string]string{resource.FinalizerKey: "finalizer2"}, - existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, - expectedMeta: map[string]string{resource.FinalizerKey: "finalizer2"}, - }, + // Block for exactly one call to Read. All subsequent calls (including those + // concurrent to the blocked call) will return immediately. + if atomic.CompareAndSwapUint32(&b.done, 0, 1) { + close(b.readCh) + <-b.blockCh } - for desc, tc := range testCases { - t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(true). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Create the resource based on tc.existingMetadata - builder := rtest.Resource(demo.TypeV1Artist, "joydivision"). - WithTenancy(resource.DefaultNamespacedTenancy()). - WithData(t, &pbdemo.Artist{Name: "Joy"}) - - if tc.existingMeta != nil { - for k, v := range tc.existingMeta { - builder.WithMeta(k, v) - } - } - res := builder.Write(t, client) - - // Build resource for user write based on tc.inputMetadata - builder = rtest.Resource(demo.TypeV1Artist, res.Id.Name). - WithTenancy(resource.DefaultNamespacedTenancy()). - WithData(t, &pbdemo.Artist{Name: "Joy Division"}) - - if tc.inputMeta != nil { - for k, v := range tc.inputMeta { - builder.WithMeta(k, v) - } - } - userRes := builder.Build() - - // Perform the user write - rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: userRes}) - require.NoError(t, err) - - // Verify write result preserved metadata based on testcase.expecteMetadata - for k := range tc.expectedMeta { - require.Equal(t, tc.expectedMeta[k], rsp.Resource.Metadata[k]) - } - require.Equal(t, len(tc.expectedMeta), len(rsp.Resource.Metadata)) - }) - } -} - -func TestWrite_NonCASWritePreservesDeletionTimestamp(t *testing.T) { - type testCase struct { - existingMeta map[string]string - inputMeta map[string]string - expectedMeta map[string]string - } - - // deletionTimestamp has to be generated via Delete() call and can't be embedded in testdata - // even though testcase desc refers to it. - testCases := map[string]testCase{ - "input metadata no deletion timestamp preserves existing deletion timestamp and removes single finalizer": { - inputMeta: map[string]string{resource.FinalizerKey: "finalizer1"}, - existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, - expectedMeta: map[string]string{resource.FinalizerKey: "finalizer1"}, - }, - "input metadata no deletion timestamp preserves existing deletion timestamp and removes all finalizers": { - inputMeta: map[string]string{resource.FinalizerKey: ""}, - existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, - expectedMeta: map[string]string{resource.FinalizerKey: ""}, - }, - } - - for desc, tc := range testCases { - t.Run(desc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithV2Tenancy(true). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Create the resource based on tc.existingMetadata - builder := rtest.Resource(demo.TypeV1Artist, "joydivision"). - WithTenancy(resource.DefaultNamespacedTenancy()). - WithData(t, &pbdemo.Artist{Name: "Joy Division"}) - - if tc.existingMeta != nil { - for k, v := range tc.existingMeta { - builder.WithMeta(k, v) - } - } - res := builder.Write(t, client) - - // Mark for deletion - _, err := client.Delete(context.Background(), &pbresource.DeleteRequest{Id: res.Id}) - require.NoError(t, err) - - // Re-read the deleted res for future comparison of deletionTimestamp - delRsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: res.Id}) - require.NoError(t, err) - - // Build resource for user write based on tc.inputMetadata - builder = rtest.Resource(demo.TypeV1Artist, res.Id.Name). - WithTenancy(resource.DefaultNamespacedTenancy()). - WithData(t, &pbdemo.Artist{Name: "Joy Division"}) - - if tc.inputMeta != nil { - for k, v := range tc.inputMeta { - builder.WithMeta(k, v) - } - } - userRes := builder.Build() - - // Perform the non-CAS user write - rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: userRes}) - require.NoError(t, err) - - // Verify write result preserved metadata based on testcase.expectedMetadata - for k := range tc.expectedMeta { - require.Equal(t, tc.expectedMeta[k], rsp.Resource.Metadata[k]) - } - // Verify deletion timestamp preserved even though it wasn't passed in to the write - require.Equal(t, delRsp.Resource.Metadata[resource.DeletionTimestampKey], rsp.Resource.Metadata[resource.DeletionTimestampKey]) - }) - } + return res, err } diff --git a/agent/grpc-external/services/serverdiscovery/server.go b/agent/grpc-external/services/serverdiscovery/server.go index 805b95c3780d8..477617122120c 100644 --- a/agent/grpc-external/services/serverdiscovery/server.go +++ b/agent/grpc-external/services/serverdiscovery/server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package serverdiscovery @@ -37,6 +37,6 @@ func NewServer(cfg Config) *Server { return &Server{cfg} } -func (s *Server) Register(registrar grpc.ServiceRegistrar) { - pbserverdiscovery.RegisterServerDiscoveryServiceServer(registrar, s) +func (s *Server) Register(grpcServer *grpc.Server) { + pbserverdiscovery.RegisterServerDiscoveryServiceServer(grpcServer, s) } diff --git a/agent/grpc-external/services/serverdiscovery/server_test.go b/agent/grpc-external/services/serverdiscovery/server_test.go index a9fd65b7cbdce..cac32bd31ee30 100644 --- a/agent/grpc-external/services/serverdiscovery/server_test.go +++ b/agent/grpc-external/services/serverdiscovery/server_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package serverdiscovery diff --git a/agent/grpc-external/services/serverdiscovery/watch_servers.go b/agent/grpc-external/services/serverdiscovery/watch_servers.go index 94ed7ac58aef6..31a2cb92c8370 100644 --- a/agent/grpc-external/services/serverdiscovery/watch_servers.go +++ b/agent/grpc-external/services/serverdiscovery/watch_servers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package serverdiscovery diff --git a/agent/grpc-external/services/serverdiscovery/watch_servers_test.go b/agent/grpc-external/services/serverdiscovery/watch_servers_test.go index 0df48f3bb35c4..d58d0be407c3f 100644 --- a/agent/grpc-external/services/serverdiscovery/watch_servers_test.go +++ b/agent/grpc-external/services/serverdiscovery/watch_servers_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package serverdiscovery diff --git a/agent/grpc-external/stats_test.go b/agent/grpc-external/stats_test.go index 7f59871db1fbc..3328001f332e8 100644 --- a/agent/grpc-external/stats_test.go +++ b/agent/grpc-external/stats_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package external @@ -28,7 +28,7 @@ import ( func TestServer_EmitsStats(t *testing.T) { sink, metricsObj := testutil.NewFakeSink(t) - srv := NewServer(hclog.Default(), metricsObj, nil, rate.NullRequestLimitsHandler(), keepalive.ServerParameters{}, nil) + srv := NewServer(hclog.Default(), metricsObj, nil, rate.NullRequestLimitsHandler(), keepalive.ServerParameters{}) testservice.RegisterSimpleServer(srv, &testservice.Simple{}) diff --git a/agent/grpc-external/testutils/acl.go b/agent/grpc-external/testutils/acl.go index ea4294e657d71..440a837682817 100644 --- a/agent/grpc-external/testutils/acl.go +++ b/agent/grpc-external/testutils/acl.go @@ -1,9 +1,11 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package testutils import ( + "testing" + "github.com/stretchr/testify/require" "github.com/hashicorp/go-uuid" @@ -11,10 +13,9 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/sdk/testutil" ) -func ACLAnonymous(t testutil.TestingTB) resolver.Result { +func ACLAnonymous(t *testing.T) resolver.Result { t.Helper() return resolver.Result{ @@ -25,7 +26,7 @@ func ACLAnonymous(t testutil.TestingTB) resolver.Result { } } -func ACLsDisabled(t testutil.TestingTB) resolver.Result { +func ACLsDisabled(t *testing.T) resolver.Result { t.Helper() return resolver.Result{ @@ -33,7 +34,7 @@ func ACLsDisabled(t testutil.TestingTB) resolver.Result { } } -func ACLNoPermissions(t testutil.TestingTB) resolver.Result { +func ACLNoPermissions(t *testing.T) resolver.Result { t.Helper() return resolver.Result{ @@ -42,7 +43,7 @@ func ACLNoPermissions(t testutil.TestingTB) resolver.Result { } } -func ACLServiceWriteAny(t testutil.TestingTB) resolver.Result { +func ACLServiceWriteAny(t *testing.T) resolver.Result { t.Helper() policy, err := acl.NewPolicyFromSource(` @@ -61,7 +62,7 @@ func ACLServiceWriteAny(t testutil.TestingTB) resolver.Result { } } -func ACLServiceRead(t testutil.TestingTB, serviceName string) resolver.Result { +func ACLServiceRead(t *testing.T, serviceName string) resolver.Result { t.Helper() aclRule := &acl.Policy{ @@ -83,19 +84,7 @@ func ACLServiceRead(t testutil.TestingTB, serviceName string) resolver.Result { } } -func ACLUseProvidedPolicy(t testutil.TestingTB, aclPolicy *acl.Policy) resolver.Result { - t.Helper() - - authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{aclPolicy}, nil) - require.NoError(t, err) - - return resolver.Result{ - Authorizer: authz, - ACLIdentity: randomACLIdentity(t), - } -} - -func ACLOperatorRead(t testutil.TestingTB) resolver.Result { +func ACLOperatorRead(t *testing.T) resolver.Result { t.Helper() aclRule := &acl.Policy{ @@ -112,7 +101,7 @@ func ACLOperatorRead(t testutil.TestingTB) resolver.Result { } } -func ACLOperatorWrite(t testutil.TestingTB) resolver.Result { +func ACLOperatorWrite(t *testing.T) resolver.Result { t.Helper() aclRule := &acl.Policy{ @@ -129,7 +118,7 @@ func ACLOperatorWrite(t testutil.TestingTB) resolver.Result { } } -func randomACLIdentity(t testutil.TestingTB) structs.ACLIdentity { +func randomACLIdentity(t *testing.T) structs.ACLIdentity { id, err := uuid.GenerateUUID() require.NoError(t, err) diff --git a/agent/grpc-external/testutils/fsm.go b/agent/grpc-external/testutils/fsm.go index fb243ac365207..0e7b645a65ec5 100644 --- a/agent/grpc-external/testutils/fsm.go +++ b/agent/grpc-external/testutils/fsm.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package testutils @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/hashicorp/consul/agent/blockingquery" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/stream" "github.com/stretchr/testify/require" @@ -71,47 +70,6 @@ func (f *FakeFSM) ReplaceStore(store *state.Store) { } } -type FakeBlockingFSM struct { - store *state.Store -} - -func NewFakeBlockingFSM(t *testing.T) *FakeBlockingFSM { - t.Helper() - - store := TestStateStore(t, nil) - - fsm := &FakeBlockingFSM{store: store} - - return fsm -} - -func (f *FakeBlockingFSM) GetState() *state.Store { - return f.store -} - -func (f *FakeBlockingFSM) ConsistentRead() error { - return nil -} - -func (f *FakeBlockingFSM) DecrementBlockingQueries() uint64 { - return 0 -} - -func (f *FakeBlockingFSM) IncrementBlockingQueries() uint64 { - return 0 -} - -func (f *FakeBlockingFSM) GetShutdownChannel() chan struct{} { - return nil -} - -func (f *FakeBlockingFSM) RPCQueryTimeout(queryTimeout time.Duration) time.Duration { - return queryTimeout -} - -func (f *FakeBlockingFSM) SetQueryMeta(blockingquery.ResponseMeta, string) { -} - func SetupFSMAndPublisher(t *testing.T, config FakeFSMConfig) (*FakeFSM, state.EventPublisher) { t.Helper() config.publisher = stream.NewEventPublisher(10 * time.Second) diff --git a/agent/grpc-external/testutils/mock_server_transport_stream.go b/agent/grpc-external/testutils/mock_server_transport_stream.go deleted file mode 100644 index b57bc737d3266..0000000000000 --- a/agent/grpc-external/testutils/mock_server_transport_stream.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package testutils - -import "google.golang.org/grpc/metadata" - -type MockServerTransportStream struct { - MD metadata.MD -} - -func (m *MockServerTransportStream) Method() string { - return "" -} - -func (m *MockServerTransportStream) SetHeader(md metadata.MD) error { - return nil -} - -func (m *MockServerTransportStream) SendHeader(md metadata.MD) error { - m.MD = metadata.Join(m.MD, md) - return nil -} - -func (m *MockServerTransportStream) SetTrailer(md metadata.MD) error { - return nil -} diff --git a/agent/grpc-external/testutils/server.go b/agent/grpc-external/testutils/server.go index 8613bec65c276..eecb10bb954f3 100644 --- a/agent/grpc-external/testutils/server.go +++ b/agent/grpc-external/testutils/server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package testutils @@ -12,7 +12,7 @@ import ( ) type GRPCService interface { - Register(grpc.ServiceRegistrar) + Register(*grpc.Server) } func RunTestServer(t *testing.T, services ...GRPCService) net.Addr { diff --git a/agent/grpc-external/utils.go b/agent/grpc-external/utils.go index 8d915943da671..b3a3d3d20264a 100644 --- a/agent/grpc-external/utils.go +++ b/agent/grpc-external/utils.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package external @@ -44,9 +44,3 @@ func RequireAnyValidACLToken(resolver ACLResolver, token string) error { return nil } - -func RequireNotNil(v interface{}, name string) { - if v == nil { - panic(name + " is required") - } -} diff --git a/agent/grpc-internal/balancer/balancer.go b/agent/grpc-internal/balancer/balancer.go index 884c2a1dec3dc..4941a80873188 100644 --- a/agent/grpc-internal/balancer/balancer.go +++ b/agent/grpc-internal/balancer/balancer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // package balancer implements a custom gRPC load balancer. // diff --git a/agent/grpc-internal/balancer/balancer_test.go b/agent/grpc-internal/balancer/balancer_test.go index f0c6db9f53296..35912aab269a1 100644 --- a/agent/grpc-internal/balancer/balancer_test.go +++ b/agent/grpc-internal/balancer/balancer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package balancer diff --git a/agent/grpc-internal/balancer/registry.go b/agent/grpc-internal/balancer/registry.go index 53b2e6555ac30..f11ea6c8cebeb 100644 --- a/agent/grpc-internal/balancer/registry.go +++ b/agent/grpc-internal/balancer/registry.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package balancer diff --git a/agent/grpc-internal/client.go b/agent/grpc-internal/client.go index 1d49bc23cdd31..98a6f1fd81c5d 100644 --- a/agent/grpc-internal/client.go +++ b/agent/grpc-internal/client.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package internal diff --git a/agent/grpc-internal/client_test.go b/agent/grpc-internal/client_test.go index 134a62aa4aae3..a3b99e78ad1b7 100644 --- a/agent/grpc-internal/client_test.go +++ b/agent/grpc-internal/client_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package internal diff --git a/agent/grpc-internal/handler.go b/agent/grpc-internal/handler.go index 3cda5087be477..3278d744436f9 100644 --- a/agent/grpc-internal/handler.go +++ b/agent/grpc-internal/handler.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package internal @@ -28,7 +28,9 @@ var ( ) // NewHandler returns a gRPC server that accepts connections from Handle(conn). -func NewHandler(logger Logger, addr net.Addr, metricsObj *metrics.Metrics, rateLimiter rate.RequestLimitsHandler) *Handler { +// The register function will be called with the grpc.Server to register +// gRPC services with the server. +func NewHandler(logger Logger, addr net.Addr, register func(server *grpc.Server), metricsObj *metrics.Metrics, rateLimiter rate.RequestLimitsHandler) *Handler { if metricsObj == nil { metricsObj = metrics.Default() } @@ -57,6 +59,7 @@ func NewHandler(logger Logger, addr net.Addr, metricsObj *metrics.Metrics, rateL // We don't need to pass tls.Config to the server since it's multiplexed // behind the RPC listener, which already has TLS configured. srv := grpc.NewServer(opts...) + register(srv) return &Handler{srv: srv, listener: NewListener(addr)} } @@ -77,12 +80,6 @@ func (h *Handler) Run() error { return h.srv.Serve(h.listener) } -// Implements the grpc.ServiceRegistrar interface to allow registering services -// with the Handler. -func (h *Handler) RegisterService(svc *grpc.ServiceDesc, impl any) { - h.srv.RegisterService(svc, impl) -} - func (h *Handler) Shutdown() error { h.srv.Stop() return nil diff --git a/agent/grpc-internal/handler_test.go b/agent/grpc-internal/handler_test.go index 2027c055866de..80c026113d1f5 100644 --- a/agent/grpc-internal/handler_test.go +++ b/agent/grpc-internal/handler_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package internal diff --git a/agent/grpc-internal/listener.go b/agent/grpc-internal/listener.go index a1c226613778c..bcbf121c733e8 100644 --- a/agent/grpc-internal/listener.go +++ b/agent/grpc-internal/listener.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package internal diff --git a/agent/grpc-internal/pipe.go b/agent/grpc-internal/pipe.go index 555f6d2162aa3..188defd085ed9 100644 --- a/agent/grpc-internal/pipe.go +++ b/agent/grpc-internal/pipe.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package internal diff --git a/agent/grpc-internal/pipe_test.go b/agent/grpc-internal/pipe_test.go index f51d1581292b2..e6ce286d1f867 100644 --- a/agent/grpc-internal/pipe_test.go +++ b/agent/grpc-internal/pipe_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package internal diff --git a/agent/grpc-internal/resolver/registry.go b/agent/grpc-internal/resolver/registry.go index aab369c501314..5151cfd46ce0f 100644 --- a/agent/grpc-internal/resolver/registry.go +++ b/agent/grpc-internal/resolver/registry.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resolver diff --git a/agent/grpc-internal/resolver/resolver.go b/agent/grpc-internal/resolver/resolver.go index 8d1436bf7aff2..d04f1e657e61f 100644 --- a/agent/grpc-internal/resolver/resolver.go +++ b/agent/grpc-internal/resolver/resolver.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package resolver diff --git a/agent/grpc-internal/server_test.go b/agent/grpc-internal/server_test.go index 65b43580a17b1..83774c712fc6a 100644 --- a/agent/grpc-internal/server_test.go +++ b/agent/grpc-internal/server_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package internal @@ -44,22 +44,21 @@ func (s testServer) Metadata() *metadata.Server { } func newSimpleTestServer(t *testing.T, name, dc string, tlsConf *tlsutil.Configurator) testServer { - return newTestServer(t, hclog.Default(), name, dc, tlsConf, func(server grpc.ServiceRegistrar) { + return newTestServer(t, hclog.Default(), name, dc, tlsConf, func(server *grpc.Server) { testservice.RegisterSimpleServer(server, &testservice.Simple{Name: name, DC: dc}) }) } // newPanicTestServer sets up a simple server with handlers that panic. func newPanicTestServer(t *testing.T, logger hclog.Logger, name, dc string, tlsConf *tlsutil.Configurator) testServer { - return newTestServer(t, logger, name, dc, tlsConf, func(server grpc.ServiceRegistrar) { + return newTestServer(t, logger, name, dc, tlsConf, func(server *grpc.Server) { testservice.RegisterSimpleServer(server, &testservice.SimplePanic{Name: name, DC: dc}) }) } -func newTestServer(t *testing.T, logger hclog.Logger, name, dc string, tlsConf *tlsutil.Configurator, register func(server grpc.ServiceRegistrar)) testServer { +func newTestServer(t *testing.T, logger hclog.Logger, name, dc string, tlsConf *tlsutil.Configurator, register func(server *grpc.Server)) testServer { addr := &net.IPAddr{IP: net.ParseIP("127.0.0.1")} - handler := NewHandler(logger, addr, nil, rate.NullRequestLimitsHandler()) - register(handler) + handler := NewHandler(logger, addr, register, nil, rate.NullRequestLimitsHandler()) lis, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) diff --git a/agent/grpc-internal/services/subscribe/logger.go b/agent/grpc-internal/services/subscribe/logger.go index 11c18f6adfa48..faaa63ff84270 100644 --- a/agent/grpc-internal/services/subscribe/logger.go +++ b/agent/grpc-internal/services/subscribe/logger.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package subscribe diff --git a/agent/grpc-internal/services/subscribe/subscribe.go b/agent/grpc-internal/services/subscribe/subscribe.go index a728b0164c977..08c501b6dd032 100644 --- a/agent/grpc-internal/services/subscribe/subscribe.go +++ b/agent/grpc-internal/services/subscribe/subscribe.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package subscribe diff --git a/agent/grpc-internal/services/subscribe/subscribe_test.go b/agent/grpc-internal/services/subscribe/subscribe_test.go index 9f6b550cc9a71..54a267f7c40d1 100644 --- a/agent/grpc-internal/services/subscribe/subscribe_test.go +++ b/agent/grpc-internal/services/subscribe/subscribe_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package subscribe @@ -377,62 +377,36 @@ func newTestBackend(t *testing.T) *testBackend { var _ Backend = (*testBackend)(nil) func runTestServer(t *testing.T, server *Server) net.Addr { - // create the errgroup and register its cleanup. Its cleanup needs to occur - // after all others and that is why this is being done so early on in this function - // as cleanup routines are processed in reverse order of them being added. - g := new(errgroup.Group) - // this cleanup needs to happen after others defined in this func so we do it early - // on up here. - t.Cleanup(func() { - if err := g.Wait(); err != nil { - t.Logf("grpc server error: %v", err) - } - }) - - // start the handler addr := &net.IPAddr{IP: net.ParseIP("127.0.0.1")} + var grpcServer *gogrpc.Server handler := grpc.NewHandler( hclog.New(nil), addr, + func(srv *gogrpc.Server) { + grpcServer = srv + pbsubscribe.RegisterStateChangeSubscriptionServer(srv, server) + }, nil, rate.NullRequestLimitsHandler(), ) - pbsubscribe.RegisterStateChangeSubscriptionServer(handler, server) - g.Go(handler.Run) - t.Cleanup(func() { - if err := handler.Shutdown(); err != nil { - t.Logf("grpc server shutdown: %v", err) - } - }) - // create the routing to forward network conns to the gRPC handler lis, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - g.Go(func() error { - for { - // select { - // case <-ctx.Done(): - // return ctx.Err() - // default: - // } - - conn, err := lis.Accept() - if err != nil { - return err - } - - // select { - // case <-ctx.Done(): - // return ctx.Err() - // default: - // } + t.Cleanup(logError(t, lis.Close)) - handler.Handle(conn) + go grpcServer.Serve(lis) + g := new(errgroup.Group) + g.Go(func() error { + return grpcServer.Serve(lis) + }) + t.Cleanup(func() { + if err := handler.Shutdown(); err != nil { + t.Logf("grpc server shutdown: %v", err) + } + if err := g.Wait(); err != nil { + t.Logf("grpc server error: %v", err) } }) - // closing the listener should cause the Accept to unblock and error out - t.Cleanup(logError(t, lis.Close)) - return lis.Addr() } diff --git a/agent/grpc-internal/stats_test.go b/agent/grpc-internal/stats_test.go index e08e869dea9b5..5da26f512ccbe 100644 --- a/agent/grpc-internal/stats_test.go +++ b/agent/grpc-internal/stats_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package internal @@ -23,13 +23,15 @@ import ( "github.com/hashicorp/consul/proto/private/prototest" ) +func noopRegister(*grpc.Server) {} + func TestHandler_EmitsStats(t *testing.T) { sink, metricsObj := testutil.NewFakeSink(t) addr := &net.IPAddr{IP: net.ParseIP("127.0.0.1")} - handler := NewHandler(hclog.Default(), addr, metricsObj, rate.NullRequestLimitsHandler()) + handler := NewHandler(hclog.Default(), addr, noopRegister, metricsObj, rate.NullRequestLimitsHandler()) - testservice.RegisterSimpleServer(handler, &testservice.Simple{}) + testservice.RegisterSimpleServer(handler.srv, &testservice.Simple{}) lis, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) diff --git a/agent/grpc-internal/tracker.go b/agent/grpc-internal/tracker.go index 251fe48f95399..a313f88e53501 100644 --- a/agent/grpc-internal/tracker.go +++ b/agent/grpc-internal/tracker.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package internal diff --git a/agent/grpc-middleware/auth_interceptor.go b/agent/grpc-middleware/auth_interceptor.go index af85e5c6f94c2..0472b71f00ece 100644 --- a/agent/grpc-middleware/auth_interceptor.go +++ b/agent/grpc-middleware/auth_interceptor.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware diff --git a/agent/grpc-middleware/auth_interceptor_test.go b/agent/grpc-middleware/auth_interceptor_test.go index 0c447499bcb2f..18f9334cc9b80 100644 --- a/agent/grpc-middleware/auth_interceptor_test.go +++ b/agent/grpc-middleware/auth_interceptor_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware diff --git a/agent/grpc-middleware/handshake.go b/agent/grpc-middleware/handshake.go index 893421e0e7f1b..82b352bb5ac30 100644 --- a/agent/grpc-middleware/handshake.go +++ b/agent/grpc-middleware/handshake.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware diff --git a/agent/grpc-middleware/handshake_test.go b/agent/grpc-middleware/handshake_test.go index 178451a31464c..f987a6689af22 100644 --- a/agent/grpc-middleware/handshake_test.go +++ b/agent/grpc-middleware/handshake_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware diff --git a/agent/grpc-middleware/rate.go b/agent/grpc-middleware/rate.go index bdb63cd244a97..6f84fd36c16e6 100644 --- a/agent/grpc-middleware/rate.go +++ b/agent/grpc-middleware/rate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware diff --git a/agent/grpc-middleware/rate_limit_mappings.gen.go b/agent/grpc-middleware/rate_limit_mappings.gen.go index 2f6364b1b29af..fc73aa9d9cf29 100644 --- a/agent/grpc-middleware/rate_limit_mappings.gen.go +++ b/agent/grpc-middleware/rate_limit_mappings.gen.go @@ -4,37 +4,35 @@ package middleware import "github.com/hashicorp/consul/agent/consul/rate" var rpcRateLimitSpecs = map[string]rate.OperationSpec{ - "/hashicorp.consul.acl.ACLService/Login": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryACL}, - "/hashicorp.consul.acl.ACLService/Logout": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryACL}, - "/hashicorp.consul.connectca.ConnectCAService/Sign": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryConnectCA}, - "/hashicorp.consul.connectca.ConnectCAService/WatchRoots": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryConnectCA}, - "/hashicorp.consul.dataplane.DataplaneService/GetEnvoyBootstrapParams": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryDataPlane}, - "/hashicorp.consul.dataplane.DataplaneService/GetSupportedDataplaneFeatures": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryDataPlane}, - "/hashicorp.consul.dns.DNSService/Query": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryDNS}, - "/hashicorp.consul.internal.configentry.ConfigEntryService/GetResolvedExportedServices": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryConfigEntry}, - "/hashicorp.consul.internal.operator.OperatorService/TransferLeader": {Type: rate.OperationTypeExempt, Category: rate.OperationCategoryOperator}, - "/hashicorp.consul.internal.peering.PeeringService/Establish": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryPeering}, - "/hashicorp.consul.internal.peering.PeeringService/GenerateToken": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryPeering}, - "/hashicorp.consul.internal.peering.PeeringService/PeeringDelete": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryPeering}, - "/hashicorp.consul.internal.peering.PeeringService/PeeringList": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryPeering}, - "/hashicorp.consul.internal.peering.PeeringService/PeeringRead": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryPeering}, - "/hashicorp.consul.internal.peering.PeeringService/PeeringWrite": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryPeering}, - "/hashicorp.consul.internal.peering.PeeringService/TrustBundleListByService": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryPeering}, - "/hashicorp.consul.internal.peering.PeeringService/TrustBundleRead": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryPeering}, - "/hashicorp.consul.internal.peerstream.PeerStreamService/ExchangeSecret": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryPeerStream}, - "/hashicorp.consul.internal.peerstream.PeerStreamService/StreamResources": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryPeerStream}, - "/hashicorp.consul.internal.storage.raft.ForwardingService/Delete": {Type: rate.OperationTypeExempt, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.internal.storage.raft.ForwardingService/List": {Type: rate.OperationTypeExempt, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.internal.storage.raft.ForwardingService/Read": {Type: rate.OperationTypeExempt, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.internal.storage.raft.ForwardingService/Write": {Type: rate.OperationTypeExempt, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.resource.ResourceService/Delete": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.resource.ResourceService/List": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.resource.ResourceService/ListByOwner": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.resource.ResourceService/MutateAndValidate": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.resource.ResourceService/Read": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.resource.ResourceService/WatchList": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.resource.ResourceService/Write": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.resource.ResourceService/WriteStatus": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryResource}, - "/hashicorp.consul.serverdiscovery.ServerDiscoveryService/WatchServers": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryServerDiscovery}, - "/subscribe.StateChangeSubscription/Subscribe": {Type: rate.OperationTypeRead, Category: rate.OperationCategorySubscribe}, + "/hashicorp.consul.acl.ACLService/Login": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryACL}, + "/hashicorp.consul.acl.ACLService/Logout": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryACL}, + "/hashicorp.consul.connectca.ConnectCAService/Sign": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryConnectCA}, + "/hashicorp.consul.connectca.ConnectCAService/WatchRoots": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryConnectCA}, + "/hashicorp.consul.dataplane.DataplaneService/GetEnvoyBootstrapParams": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryDataPlane}, + "/hashicorp.consul.dataplane.DataplaneService/GetSupportedDataplaneFeatures": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryDataPlane}, + "/hashicorp.consul.dns.DNSService/Query": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryDNS}, + "/hashicorp.consul.internal.operator.OperatorService/TransferLeader": {Type: rate.OperationTypeExempt, Category: rate.OperationCategoryOperator}, + "/hashicorp.consul.internal.peering.PeeringService/Establish": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryPeering}, + "/hashicorp.consul.internal.peering.PeeringService/GenerateToken": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryPeering}, + "/hashicorp.consul.internal.peering.PeeringService/PeeringDelete": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryPeering}, + "/hashicorp.consul.internal.peering.PeeringService/PeeringList": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryPeering}, + "/hashicorp.consul.internal.peering.PeeringService/PeeringRead": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryPeering}, + "/hashicorp.consul.internal.peering.PeeringService/PeeringWrite": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryPeering}, + "/hashicorp.consul.internal.peering.PeeringService/TrustBundleListByService": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryPeering}, + "/hashicorp.consul.internal.peering.PeeringService/TrustBundleRead": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryPeering}, + "/hashicorp.consul.internal.peerstream.PeerStreamService/ExchangeSecret": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryPeerStream}, + "/hashicorp.consul.internal.peerstream.PeerStreamService/StreamResources": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryPeerStream}, + "/hashicorp.consul.internal.storage.raft.ForwardingService/Delete": {Type: rate.OperationTypeExempt, Category: rate.OperationCategoryResource}, + "/hashicorp.consul.internal.storage.raft.ForwardingService/List": {Type: rate.OperationTypeExempt, Category: rate.OperationCategoryResource}, + "/hashicorp.consul.internal.storage.raft.ForwardingService/Read": {Type: rate.OperationTypeExempt, Category: rate.OperationCategoryResource}, + "/hashicorp.consul.internal.storage.raft.ForwardingService/Write": {Type: rate.OperationTypeExempt, Category: rate.OperationCategoryResource}, + "/hashicorp.consul.resource.ResourceService/Delete": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryResource}, + "/hashicorp.consul.resource.ResourceService/List": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryResource}, + "/hashicorp.consul.resource.ResourceService/ListByOwner": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryResource}, + "/hashicorp.consul.resource.ResourceService/Read": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryResource}, + "/hashicorp.consul.resource.ResourceService/WatchList": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryResource}, + "/hashicorp.consul.resource.ResourceService/Write": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryResource}, + "/hashicorp.consul.resource.ResourceService/WriteStatus": {Type: rate.OperationTypeWrite, Category: rate.OperationCategoryResource}, + "/hashicorp.consul.serverdiscovery.ServerDiscoveryService/WatchServers": {Type: rate.OperationTypeRead, Category: rate.OperationCategoryServerDiscovery}, + "/subscribe.StateChangeSubscription/Subscribe": {Type: rate.OperationTypeRead, Category: rate.OperationCategorySubscribe}, } diff --git a/agent/grpc-middleware/rate_test.go b/agent/grpc-middleware/rate_test.go index 16c84734a9edd..0a71d232465cf 100644 --- a/agent/grpc-middleware/rate_test.go +++ b/agent/grpc-middleware/rate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware diff --git a/agent/grpc-middleware/recovery.go b/agent/grpc-middleware/recovery.go index 04b918ee9b3a2..cf1cbabe4e085 100644 --- a/agent/grpc-middleware/recovery.go +++ b/agent/grpc-middleware/recovery.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware diff --git a/agent/grpc-middleware/stats.go b/agent/grpc-middleware/stats.go index b17fe7138588d..564d14a844b95 100644 --- a/agent/grpc-middleware/stats.go +++ b/agent/grpc-middleware/stats.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware @@ -107,13 +107,6 @@ func (c *statsHandler) HandleConn(_ context.Context, s stats.ConnStats) { c.metrics.SetGaugeWithLabels([]string{"grpc", label, "connections"}, float32(count), c.labels) } -// Intercept matches the Unary interceptor function signature. This unary interceptor will count RPC requests -// but does not handle any connection processing or perform RPC "tagging" -func (c *statsHandler) Intercept(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - c.metrics.IncrCounterWithLabels([]string{"grpc", "server", "request", "count"}, 1, c.labels) - return handler(ctx, req) -} - type activeStreamCounter struct { // count is used with sync/atomic and MUST be 64-bit aligned. To ensure // alignment on 32-bit platforms this field must remain the first field in diff --git a/agent/grpc-middleware/testutil/fake_sink.go b/agent/grpc-middleware/testutil/fake_sink.go index be7623c774a2c..c121481ee24fb 100644 --- a/agent/grpc-middleware/testutil/fake_sink.go +++ b/agent/grpc-middleware/testutil/fake_sink.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package testutil diff --git a/agent/grpc-middleware/testutil/testservice/buf.gen.yaml b/agent/grpc-middleware/testutil/testservice/buf.gen.yaml index 8d8a6c7dbfc09..b8ba317a333a9 100644 --- a/agent/grpc-middleware/testutil/testservice/buf.gen.yaml +++ b/agent/grpc-middleware/testutil/testservice/buf.gen.yaml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 version: v1 managed: diff --git a/agent/grpc-middleware/testutil/testservice/fake_service.go b/agent/grpc-middleware/testutil/testservice/fake_service.go index ca21d286f0b35..4428e173740bb 100644 --- a/agent/grpc-middleware/testutil/testservice/fake_service.go +++ b/agent/grpc-middleware/testutil/testservice/fake_service.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package testservice diff --git a/agent/grpc-middleware/testutil/testservice/simple.pb.go b/agent/grpc-middleware/testutil/testservice/simple.pb.go index fcd9fb2fe4a41..ce05a07cdf0d0 100644 --- a/agent/grpc-middleware/testutil/testservice/simple.pb.go +++ b/agent/grpc-middleware/testutil/testservice/simple.pb.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // Code generated by protoc-gen-go. DO NOT EDIT. // versions: diff --git a/agent/grpc-middleware/testutil/testservice/simple.proto b/agent/grpc-middleware/testutil/testservice/simple.proto index d005a45aa1138..c8ce3d58118d3 100644 --- a/agent/grpc-middleware/testutil/testservice/simple.proto +++ b/agent/grpc-middleware/testutil/testservice/simple.proto @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 syntax = "proto3"; diff --git a/agent/hcp/bootstrap/bootstrap.go b/agent/hcp/bootstrap/bootstrap.go index 914a1890f3eba..191859ea002b4 100644 --- a/agent/hcp/bootstrap/bootstrap.go +++ b/agent/hcp/bootstrap/bootstrap.go @@ -1,7 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -// Package bootstrap handles bootstrapping an agent's config from HCP. +// Package bootstrap handles bootstrapping an agent's config from HCP. It must be a +// separate package from other HCP components because it has a dependency on +// agent/config while other components need to be imported and run within the +// server process in agent/consul and that would create a dependency cycle. package bootstrap import ( @@ -18,25 +21,27 @@ import ( "strings" "time" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-uuid" - + "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/connect" - "github.com/hashicorp/consul/agent/hcp/bootstrap/constants" hcpclient "github.com/hashicorp/consul/agent/hcp/client" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/retry" + "github.com/hashicorp/go-uuid" ) const ( - CAFileName = "server-tls-cas.pem" - CertFileName = "server-tls-cert.pem" - ConfigFileName = "server-config.json" - KeyFileName = "server-tls-key.pem" - TokenFileName = "hcp-management-token" - SuccessFileName = "successful-bootstrap" + subDir = "hcp-config" + + caFileName = "server-tls-cas.pem" + certFileName = "server-tls-cert.pem" + configFileName = "server-config.json" + keyFileName = "server-tls-key.pem" + tokenFileName = "hcp-management-token" + successFileName = "successful-bootstrap" ) +type ConfigLoader func(source config.Source) (config.LoadResult, error) + // UI is a shim to allow the agent command to pass in it's mitchelh/cli.UI so we // can output useful messages to the user during bootstrapping. For example if // we have to retry several times to bootstrap we don't want the agent to just @@ -56,27 +61,142 @@ type RawBootstrapConfig struct { ManagementToken string } -// FetchBootstrapConfig will fetch bootstrap configuration from remote servers and persist it to disk. +// LoadConfig will attempt to load previously-fetched config from disk and fall back to +// fetch from HCP servers if the local data is incomplete. +// It must be passed a (CLI) UI implementation so it can deliver progress +// updates to the user, for example if it is waiting to retry for a long period. +func LoadConfig(ctx context.Context, client hcpclient.Client, dataDir string, loader ConfigLoader, ui UI) (ConfigLoader, error) { + ui.Output("Loading configuration from HCP") + + // See if we have existing config on disk + // + // OPTIMIZE: We could probably be more intelligent about config loading. + // The currently implemented approach is: + // 1. Attempt to load data from disk + // 2. If that fails or the data is incomplete, block indefinitely fetching remote config. + // + // What if instead we had the following flow: + // 1. Attempt to fetch config from HCP. + // 2. If that fails, fall back to data on disk from last fetch. + // 3. If that fails, go into blocking loop to fetch remote config. + // + // This should allow us to more gracefully transition cases like when + // an existing cluster is linked, but then wants to receive TLS materials + // at a later time. Currently, if we observe the existing-cluster marker we + // don't attempt to fetch any additional configuration from HCP. + + cfg, ok := loadPersistedBootstrapConfig(dataDir, ui) + if !ok { + ui.Info("Fetching configuration from HCP servers") + + var err error + cfg, err = fetchBootstrapConfig(ctx, client, dataDir, ui) + if err != nil { + return nil, fmt.Errorf("failed to bootstrap from HCP: %w", err) + } + ui.Info("Configuration fetched from HCP and saved on local disk") + + } else { + ui.Info("Loaded HCP configuration from local disk") + + } + + // Create a new loader func to return + newLoader := bootstrapConfigLoader(loader, cfg) + return newLoader, nil +} + +// bootstrapConfigLoader is a ConfigLoader for passing bootstrap JSON config received from HCP +// to the config.builder. ConfigLoaders are functions used to build an agent's RuntimeConfig +// from various sources like files and flags. This config is contained in the config.LoadResult. +// +// The flow to include bootstrap config from HCP as a loader's data source is as follows: +// +// 1. A base ConfigLoader function (baseLoader) is created on agent start, and it sets the input +// source argument as the DefaultConfig. +// +// 2. When a server agent can be configured by HCP that baseLoader is wrapped in this bootstrapConfigLoader. +// +// 3. The bootstrapConfigLoader calls that base loader with the bootstrap JSON config as the +// default source. This data will be merged with other valid sources in the config.builder. +// +// 4. The result of the call to baseLoader() below contains the resulting RuntimeConfig, and we do some +// additional modifications to attach data that doesn't get populated during the build in the config pkg. +// +// Note that since the ConfigJSON is stored as the baseLoader's DefaultConfig, its data is the first +// to be merged by the config.builder and could be overwritten by user-provided values in config files or +// CLI flags. However, values set to RuntimeConfig after the baseLoader call are final. +func bootstrapConfigLoader(baseLoader ConfigLoader, cfg *RawBootstrapConfig) ConfigLoader { + return func(source config.Source) (config.LoadResult, error) { + // Don't allow any further attempts to provide a DefaultSource. This should + // only ever be needed later in client agent AutoConfig code but that should + // be mutually exclusive from this bootstrapping mechanism since this is + // only for servers. If we ever try to change that, this clear failure + // should alert future developers that the assumptions are changing rather + // than quietly not applying the config they expect! + if source != nil { + return config.LoadResult{}, + fmt.Errorf("non-nil config source provided to a loader after HCP bootstrap already provided a DefaultSource") + } + + // Otherwise, just call to the loader we were passed with our own additional + // JSON as the source. + // + // OPTIMIZE: We could check/log whether any fields set by the remote config were overwritten by a user-provided flag. + res, err := baseLoader(config.FileSource{ + Name: "HCP Bootstrap", + Format: "json", + Data: cfg.ConfigJSON, + }) + if err != nil { + return res, fmt.Errorf("failed to load HCP Bootstrap config: %w", err) + } + + finalizeRuntimeConfig(res.RuntimeConfig, cfg) + return res, nil + } +} + +const ( + accessControlHeaderName = "Access-Control-Expose-Headers" + accessControlHeaderValue = "x-consul-default-acl-policy" +) + +// finalizeRuntimeConfig will set additional HCP-specific values that are not +// handled by the config.builder. +func finalizeRuntimeConfig(rc *config.RuntimeConfig, cfg *RawBootstrapConfig) { + rc.Cloud.ManagementToken = cfg.ManagementToken + + // HTTP response headers are modified for the HCP UI to work. + if rc.HTTPResponseHeaders == nil { + rc.HTTPResponseHeaders = make(map[string]string) + } + prevValue, ok := rc.HTTPResponseHeaders[accessControlHeaderName] + if !ok { + rc.HTTPResponseHeaders[accessControlHeaderName] = accessControlHeaderValue + } else { + rc.HTTPResponseHeaders[accessControlHeaderName] = prevValue + "," + accessControlHeaderValue + } +} + +// fetchBootstrapConfig will fetch boostrap configuration from remote servers and persist it to disk. // It will retry until successful or a terminal error condition is found (e.g. permission denied). -func FetchBootstrapConfig(ctx context.Context, client hcpclient.Client, dataDir string, ui UI) (*RawBootstrapConfig, error) { +func fetchBootstrapConfig(ctx context.Context, client hcpclient.Client, dataDir string, ui UI) (*RawBootstrapConfig, error) { w := retry.Waiter{ MinWait: 1 * time.Second, MaxWait: 5 * time.Minute, Jitter: retry.NewJitter(50), } + var bsCfg *hcpclient.BootstrapConfig for { // Note we don't want to shadow `ctx` here since we need that for the Wait // below. reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - cfg, err := fetchBootstrapConfig(reqCtx, client, dataDir) + resp, err := client.FetchBootstrap(reqCtx) if err != nil { - if errors.Is(err, hcpclient.ErrUnauthorized) || errors.Is(err, hcpclient.ErrForbidden) { - // Don't retry on terminal errors - return nil, err - } ui.Error(fmt.Sprintf("Error: failed to fetch bootstrap config from HCP, will retry in %s: %s", w.NextWait().Round(time.Second), err)) if err := w.Wait(ctx); err != nil { @@ -85,22 +205,12 @@ func FetchBootstrapConfig(ctx context.Context, client hcpclient.Client, dataDir // Finished waiting, restart loop continue } - return cfg, nil - } -} - -// fetchBootstrapConfig will fetch the bootstrap configuration from remote servers and persist it to disk. -func fetchBootstrapConfig(ctx context.Context, client hcpclient.Client, dataDir string) (*RawBootstrapConfig, error) { - reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - resp, err := client.FetchBootstrap(reqCtx) - if err != nil { - return nil, fmt.Errorf("failed to fetch bootstrap config from HCP: %w", err) + bsCfg = resp + break } - bsCfg := resp devMode := dataDir == "" + cfgJSON, err := persistAndProcessConfig(dataDir, devMode, bsCfg) if err != nil { return nil, fmt.Errorf("failed to persist config for existing cluster: %w", err) @@ -128,7 +238,7 @@ func persistAndProcessConfig(dataDir string, devMode bool, bsCfg *hcpclient.Boot } // Create subdir if it's not already there. - dir := filepath.Join(dataDir, constants.SubDir) + dir := filepath.Join(dataDir, subDir) if err := lib.EnsurePath(dir, true); err != nil { return "", fmt.Errorf("failed to ensure directory %q: %w", dir, err) } @@ -158,7 +268,7 @@ func persistAndProcessConfig(dataDir string, devMode bool, bsCfg *hcpclient.Boot var cfgJSON string if bsCfg.TLSCert != "" { - if err := ValidateTLSCerts(bsCfg.TLSCert, bsCfg.TLSCertKey, bsCfg.TLSCAs); err != nil { + if err := validateTLSCerts(bsCfg.TLSCert, bsCfg.TLSCertKey, bsCfg.TLSCAs); err != nil { return "", fmt.Errorf("invalid certificates: %w", err) } @@ -169,9 +279,9 @@ func persistAndProcessConfig(dataDir string, devMode bool, bsCfg *hcpclient.Boot } // Store paths to the persisted TLS cert files. - cfg["ca_file"] = filepath.Join(dir, CAFileName) - cfg["cert_file"] = filepath.Join(dir, CertFileName) - cfg["key_file"] = filepath.Join(dir, KeyFileName) + cfg["ca_file"] = filepath.Join(dir, caFileName) + cfg["cert_file"] = filepath.Join(dir, certFileName) + cfg["key_file"] = filepath.Join(dir, keyFileName) // Convert the bootstrap config map back into a string cfgJSONBytes, err := json.Marshal(cfg) @@ -207,7 +317,7 @@ func persistAndProcessConfig(dataDir string, devMode bool, bsCfg *hcpclient.Boot } func persistSuccessMarker(dir string) error { - name := filepath.Join(dir, SuccessFileName) + name := filepath.Join(dir, successFileName) return os.WriteFile(name, []byte(""), 0600) } @@ -220,7 +330,7 @@ func persistTLSCerts(dir string, serverCert, serverKey string, caCerts []string) // Write out CA cert(s). We write them all to one file because Go's x509 // machinery will read as many certs as it finds from each PEM file provided // and add them separaetly to the CertPool for validation - f, err := os.OpenFile(filepath.Join(dir, CAFileName), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + f, err := os.OpenFile(filepath.Join(dir, caFileName), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return err } @@ -235,11 +345,11 @@ func persistTLSCerts(dir string, serverCert, serverKey string, caCerts []string) return err } - if err := os.WriteFile(filepath.Join(dir, CertFileName), []byte(serverCert), 0600); err != nil { + if err := os.WriteFile(filepath.Join(dir, certFileName), []byte(serverCert), 0600); err != nil { return err } - if err := os.WriteFile(filepath.Join(dir, KeyFileName), []byte(serverKey), 0600); err != nil { + if err := os.WriteFile(filepath.Join(dir, keyFileName), []byte(serverKey), 0600); err != nil { return err } @@ -256,26 +366,26 @@ func validateManagementToken(token string) error { } func persistManagementToken(dir, token string) error { - name := filepath.Join(dir, TokenFileName) + name := filepath.Join(dir, tokenFileName) return os.WriteFile(name, []byte(token), 0600) } func persistBootstrapConfig(dir, cfgJSON string) error { // Persist the important bits we got from bootstrapping. The TLS certs are // already persisted, just need to persist the config we are going to add. - name := filepath.Join(dir, ConfigFileName) + name := filepath.Join(dir, configFileName) return os.WriteFile(name, []byte(cfgJSON), 0600) } -func LoadPersistedBootstrapConfig(dataDir string, ui UI) (*RawBootstrapConfig, bool) { +func loadPersistedBootstrapConfig(dataDir string, ui UI) (*RawBootstrapConfig, bool) { if dataDir == "" { // There's no files to load when in dev mode. return nil, false } - dir := filepath.Join(dataDir, constants.SubDir) + dir := filepath.Join(dataDir, subDir) - _, err := os.Stat(filepath.Join(dir, SuccessFileName)) + _, err := os.Stat(filepath.Join(dir, successFileName)) if os.IsNotExist(err) { // Haven't bootstrapped from HCP. return nil, false @@ -309,7 +419,7 @@ func LoadPersistedBootstrapConfig(dataDir string, ui UI) (*RawBootstrapConfig, b } func loadBootstrapConfigJSON(dataDir string) (string, error) { - filename := filepath.Join(dataDir, constants.SubDir, ConfigFileName) + filename := filepath.Join(dataDir, subDir, configFileName) _, err := os.Stat(filename) if os.IsNotExist(err) { @@ -319,6 +429,21 @@ func loadBootstrapConfigJSON(dataDir string) (string, error) { return "", fmt.Errorf("failed to check for bootstrap config: %w", err) } + // Attempt to load persisted config to check for errors and basic validity. + // Errors here will raise issues like referencing unsupported config fields. + _, err = config.Load(config.LoadOpts{ + ConfigFiles: []string{filename}, + HCL: []string{ + "server = true", + `bind_addr = "127.0.0.1"`, + fmt.Sprintf("data_dir = %q", dataDir), + }, + ConfigFormat: "json", + }) + if err != nil { + return "", fmt.Errorf("failed to parse local bootstrap config: %w", err) + } + jsonBs, err := os.ReadFile(filename) if err != nil { return "", fmt.Errorf(fmt.Sprintf("failed to read local bootstrap config file: %s", err)) @@ -327,7 +452,7 @@ func loadBootstrapConfigJSON(dataDir string) (string, error) { } func loadManagementToken(dir string) (string, error) { - name := filepath.Join(dir, TokenFileName) + name := filepath.Join(dir, tokenFileName) bytes, err := os.ReadFile(name) if os.IsNotExist(err) { return "", errors.New("configuration files on disk are incomplete, missing: " + name) @@ -346,9 +471,9 @@ func loadManagementToken(dir string) (string, error) { func checkCerts(dir string) error { files := []string{ - filepath.Join(dir, CAFileName), - filepath.Join(dir, CertFileName), - filepath.Join(dir, KeyFileName), + filepath.Join(dir, caFileName), + filepath.Join(dir, certFileName), + filepath.Join(dir, keyFileName), } missing := make([]string, 0) @@ -374,28 +499,28 @@ func checkCerts(dir string) error { return fmt.Errorf("configuration files on disk are incomplete, missing: %v", missing) } - cert, key, caCerts, err := LoadCerts(dir) + cert, key, caCerts, err := loadCerts(dir) if err != nil { return fmt.Errorf("failed to load certs from disk: %w", err) } - if err = ValidateTLSCerts(cert, key, caCerts); err != nil { + if err = validateTLSCerts(cert, key, caCerts); err != nil { return fmt.Errorf("invalid certs on disk: %w", err) } return nil } -func LoadCerts(dir string) (cert, key string, caCerts []string, err error) { - certPEMBlock, err := os.ReadFile(filepath.Join(dir, CertFileName)) +func loadCerts(dir string) (cert, key string, caCerts []string, err error) { + certPEMBlock, err := os.ReadFile(filepath.Join(dir, certFileName)) if err != nil { return "", "", nil, err } - keyPEMBlock, err := os.ReadFile(filepath.Join(dir, KeyFileName)) + keyPEMBlock, err := os.ReadFile(filepath.Join(dir, keyFileName)) if err != nil { return "", "", nil, err } - caPEMs, err := os.ReadFile(filepath.Join(dir, CAFileName)) + caPEMs, err := os.ReadFile(filepath.Join(dir, caFileName)) if err != nil { return "", "", nil, err } @@ -434,12 +559,12 @@ func splitCACerts(caPEMs []byte) ([]string, error) { return out, nil } -// ValidateTLSCerts checks that the CA cert, server cert, and key on disk are structurally valid. +// validateTLSCerts checks that the CA cert, server cert, and key on disk are structurally valid. // // OPTIMIZE: This could be improved by returning an error if certs are expired or close to expiration. // However, that requires issuing new certs on bootstrap requests, since returning an error // would trigger a re-fetch from HCP. -func ValidateTLSCerts(cert, key string, caCerts []string) error { +func validateTLSCerts(cert, key string, caCerts []string) error { leaf, err := tls.X509KeyPair([]byte(cert), []byte(key)) if err != nil { return errors.New("invalid server certificate or key") @@ -457,25 +582,3 @@ func ValidateTLSCerts(cert, key string, caCerts []string) error { } return nil } - -// LoadManagementToken returns the management token, either by loading it from the persisted -// token config file or by fetching it from HCP if the token file does not exist. -func LoadManagementToken(ctx context.Context, logger hclog.Logger, client hcpclient.Client, dataDir string) (string, error) { - hcpCfgDir := filepath.Join(dataDir, constants.SubDir) - token, err := loadManagementToken(hcpCfgDir) - - if err != nil { - logger.Debug("failed to load management token from local disk, fetching configuration from HCP", "error", err) - var err error - cfg, err := fetchBootstrapConfig(ctx, client, dataDir) - if err != nil { - return "", err - } - logger.Debug("configuration fetched from HCP and saved on local disk") - token = cfg.ManagementToken - } else { - logger.Trace("loaded HCP configuration from local disk") - } - - return token, nil -} diff --git a/agent/hcp/bootstrap/bootstrap_test.go b/agent/hcp/bootstrap/bootstrap_test.go index 3f2f84da87d18..b475223ff8cf3 100644 --- a/agent/hcp/bootstrap/bootstrap_test.go +++ b/agent/hcp/bootstrap/bootstrap_test.go @@ -5,26 +5,303 @@ package bootstrap import ( "context" - "errors" + "crypto/tls" + "crypto/x509" + "fmt" + "net/http/httptest" "os" "path/filepath" "testing" - "time" - "github.com/mitchellh/cli" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-uuid" - - "github.com/hashicorp/consul/agent/hcp/bootstrap/constants" + "github.com/hashicorp/consul/agent/config" + "github.com/hashicorp/consul/agent/hcp" hcpclient "github.com/hashicorp/consul/agent/hcp/client" "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/tlsutil" + "github.com/hashicorp/go-uuid" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" ) +func TestBootstrapConfigLoader(t *testing.T) { + baseLoader := func(source config.Source) (config.LoadResult, error) { + return config.Load(config.LoadOpts{ + DefaultConfig: source, + HCL: []string{ + `server = true`, + `bind_addr = "127.0.0.1"`, + `data_dir = "/tmp/consul-data"`, + }, + }) + } + + bootstrapLoader := func(source config.Source) (config.LoadResult, error) { + return bootstrapConfigLoader(baseLoader, &RawBootstrapConfig{ + ConfigJSON: `{"bootstrap_expect": 8}`, + ManagementToken: "test-token", + })(source) + } + + result, err := bootstrapLoader(nil) + require.NoError(t, err) + + // bootstrap_expect and management token are injected from bootstrap config received from HCP. + require.Equal(t, 8, result.RuntimeConfig.BootstrapExpect) + require.Equal(t, "test-token", result.RuntimeConfig.Cloud.ManagementToken) + + // Response header is always injected from a constant. + require.Equal(t, "x-consul-default-acl-policy", result.RuntimeConfig.HTTPResponseHeaders[accessControlHeaderName]) +} + +func Test_finalizeRuntimeConfig(t *testing.T) { + type testCase struct { + rc *config.RuntimeConfig + cfg *RawBootstrapConfig + verifyFn func(t *testing.T, rc *config.RuntimeConfig) + } + run := func(t *testing.T, tc testCase) { + finalizeRuntimeConfig(tc.rc, tc.cfg) + tc.verifyFn(t, tc.rc) + } + + tt := map[string]testCase{ + "set header if not present": { + rc: &config.RuntimeConfig{}, + cfg: &RawBootstrapConfig{ + ManagementToken: "test-token", + }, + verifyFn: func(t *testing.T, rc *config.RuntimeConfig) { + require.Equal(t, "test-token", rc.Cloud.ManagementToken) + require.Equal(t, "x-consul-default-acl-policy", rc.HTTPResponseHeaders[accessControlHeaderName]) + }, + }, + "append to header if present": { + rc: &config.RuntimeConfig{ + HTTPResponseHeaders: map[string]string{ + accessControlHeaderName: "Content-Encoding", + }, + }, + cfg: &RawBootstrapConfig{ + ManagementToken: "test-token", + }, + verifyFn: func(t *testing.T, rc *config.RuntimeConfig) { + require.Equal(t, "test-token", rc.Cloud.ManagementToken) + require.Equal(t, "Content-Encoding,x-consul-default-acl-policy", rc.HTTPResponseHeaders[accessControlHeaderName]) + }, + }, + } + + for name, tc := range tt { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func boolPtr(value bool) *bool { + return &value +} + +func TestLoadConfig_Persistence(t *testing.T) { + type testCase struct { + // resourceID is the HCP resource ID. If set, a server is considered to be cloud-enabled. + resourceID string + + // devMode indicates whether the loader should not have a data directory. + devMode bool + + // verifyFn issues case-specific assertions. + verifyFn func(t *testing.T, rc *config.RuntimeConfig) + } + + run := func(t *testing.T, tc testCase) { + dir, err := os.MkdirTemp(os.TempDir(), "bootstrap-test-") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(dir) }) + + s := hcp.NewMockHCPServer() + s.AddEndpoint(TestEndpoint()) + + // Use an HTTPS server since that's what the HCP SDK expects for auth. + srv := httptest.NewTLSServer(s) + defer srv.Close() + + caCert, err := x509.ParseCertificate(srv.TLS.Certificates[0].Certificate[0]) + require.NoError(t, err) + + pool := x509.NewCertPool() + pool.AddCert(caCert) + clientTLS := &tls.Config{RootCAs: pool} + + baseOpts := config.LoadOpts{ + HCL: []string{ + `server = true`, + `bind_addr = "127.0.0.1"`, + fmt.Sprintf(`http_config = { response_headers = { %s = "Content-Encoding" } }`, accessControlHeaderName), + fmt.Sprintf(`cloud { client_id="test" client_secret="test" hostname=%q auth_url=%q resource_id=%q }`, + srv.Listener.Addr().String(), srv.URL, tc.resourceID), + }, + } + if tc.devMode { + baseOpts.DevMode = boolPtr(true) + } else { + baseOpts.HCL = append(baseOpts.HCL, fmt.Sprintf(`data_dir = %q`, dir)) + } + + baseLoader := func(source config.Source) (config.LoadResult, error) { + baseOpts.DefaultConfig = source + return config.Load(baseOpts) + } + + ui := cli.NewMockUi() + + // Load initial config to check whether bootstrapping from HCP is enabled. + initial, err := baseLoader(nil) + require.NoError(t, err) + + // Override the client TLS config so that the test server can be trusted. + initial.RuntimeConfig.Cloud.WithTLSConfig(clientTLS) + client, err := hcpclient.NewClient(initial.RuntimeConfig.Cloud) + require.NoError(t, err) + + loader, err := LoadConfig(context.Background(), client, initial.RuntimeConfig.DataDir, baseLoader, ui) + require.NoError(t, err) + + // Load the agent config with the potentially wrapped loader. + fromRemote, err := loader(nil) + require.NoError(t, err) + + // HCP-enabled cases should fetch from HCP on the first run of LoadConfig. + require.Contains(t, ui.OutputWriter.String(), "Fetching configuration from HCP") + + // Run case-specific verification. + tc.verifyFn(t, fromRemote.RuntimeConfig) + + require.Empty(t, fromRemote.RuntimeConfig.ACLInitialManagementToken, + "initial_management token should have been sanitized") + + if tc.devMode { + // Re-running the bootstrap func below isn't relevant to dev mode + // since they don't have a data directory to load data from. + return + } + + // Run LoadConfig again to exercise the logic of loading config from disk. + loader, err = LoadConfig(context.Background(), client, initial.RuntimeConfig.DataDir, baseLoader, ui) + require.NoError(t, err) + + fromDisk, err := loader(nil) + require.NoError(t, err) + + // HCP-enabled cases should fetch from disk on the second run. + require.Contains(t, ui.OutputWriter.String(), "Loaded HCP configuration from local disk") + + // Config loaded from disk should be the same as the one that was initially fetched from the HCP servers. + require.Equal(t, fromRemote.RuntimeConfig, fromDisk.RuntimeConfig) + } + + tt := map[string]testCase{ + "dev mode": { + devMode: true, + + resourceID: "organization/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + + "project/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + + "consul.cluster/new-cluster-id", + + verifyFn: func(t *testing.T, rc *config.RuntimeConfig) { + require.Empty(t, rc.DataDir) + + // Dev mode should have persisted certs since they can't be inlined. + require.NotEmpty(t, rc.TLS.HTTPS.CertFile) + require.NotEmpty(t, rc.TLS.HTTPS.KeyFile) + require.NotEmpty(t, rc.TLS.HTTPS.CAFile) + + // Find the temporary directory they got stored in. + dir := filepath.Dir(rc.TLS.HTTPS.CertFile) + + // Ensure we only stored the TLS materials. + entries, err := os.ReadDir(dir) + require.NoError(t, err) + require.Len(t, entries, 3) + + haveFiles := make([]string, 3) + for i, entry := range entries { + haveFiles[i] = entry.Name() + } + + wantFiles := []string{caFileName, certFileName, keyFileName} + require.ElementsMatch(t, wantFiles, haveFiles) + }, + }, + "new cluster": { + resourceID: "organization/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + + "project/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + + "consul.cluster/new-cluster-id", + + // New clusters should have received and persisted the whole suite of config. + verifyFn: func(t *testing.T, rc *config.RuntimeConfig) { + dir := filepath.Join(rc.DataDir, subDir) + + entries, err := os.ReadDir(dir) + require.NoError(t, err) + require.Len(t, entries, 6) + + files := []string{ + filepath.Join(dir, configFileName), + filepath.Join(dir, caFileName), + filepath.Join(dir, certFileName), + filepath.Join(dir, keyFileName), + filepath.Join(dir, tokenFileName), + filepath.Join(dir, successFileName), + } + for _, name := range files { + _, err := os.Stat(name) + require.NoError(t, err) + } + + require.Equal(t, filepath.Join(dir, certFileName), rc.TLS.HTTPS.CertFile) + require.Equal(t, filepath.Join(dir, keyFileName), rc.TLS.HTTPS.KeyFile) + require.Equal(t, filepath.Join(dir, caFileName), rc.TLS.HTTPS.CAFile) + + cert, key, caCerts, err := loadCerts(dir) + require.NoError(t, err) + + require.NoError(t, validateTLSCerts(cert, key, caCerts)) + }, + }, + "existing cluster": { + resourceID: "organization/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + + "project/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + + "consul.cluster/" + TestExistingClusterID, + + // Existing clusters should have only received and persisted the management token. + verifyFn: func(t *testing.T, rc *config.RuntimeConfig) { + dir := filepath.Join(rc.DataDir, subDir) + + entries, err := os.ReadDir(dir) + require.NoError(t, err) + require.Len(t, entries, 3) + + files := []string{ + filepath.Join(dir, tokenFileName), + filepath.Join(dir, successFileName), + filepath.Join(dir, configFileName), + } + for _, name := range files { + _, err := os.Stat(name) + require.NoError(t, err) + } + }, + }, + } + + for name, tc := range tt { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} + func Test_loadPersistedBootstrapConfig(t *testing.T) { type expect struct { loaded bool @@ -38,9 +315,11 @@ func Test_loadPersistedBootstrapConfig(t *testing.T) { } run := func(t *testing.T, tc testCase) { - dataDir := testutil.TempDir(t, "load-bootstrap-cfg") + dataDir, err := os.MkdirTemp(os.TempDir(), "load-bootstrap-test-") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(dataDir) }) - dir := filepath.Join(dataDir, constants.SubDir) + dir := filepath.Join(dataDir, subDir) // Do some common setup as if we received config from HCP and persisted it to disk. require.NoError(t, lib.EnsurePath(dir, true)) @@ -60,7 +339,6 @@ func Test_loadPersistedBootstrapConfig(t *testing.T) { var token string if !tc.disableManagementToken { - var err error token, err = uuid.GenerateUUID() require.NoError(t, err) require.NoError(t, persistManagementToken(dir, token)) @@ -72,7 +350,7 @@ func Test_loadPersistedBootstrapConfig(t *testing.T) { } ui := cli.NewMockUi() - cfg, loaded := LoadPersistedBootstrapConfig(dataDir, ui) + cfg, loaded := loadPersistedBootstrapConfig(dataDir, ui) require.Equal(t, tc.expect.loaded, loaded, ui.ErrorWriter.String()) if loaded { require.Equal(t, token, cfg.ManagementToken) @@ -129,7 +407,7 @@ func Test_loadPersistedBootstrapConfig(t *testing.T) { "new cluster some files": { mutateFn: func(t *testing.T, dir string) { // Remove one of the required files - require.NoError(t, os.Remove(filepath.Join(dir, CertFileName))) + require.NoError(t, os.Remove(filepath.Join(dir, certFileName))) }, expect: expect{ loaded: false, @@ -149,7 +427,7 @@ func Test_loadPersistedBootstrapConfig(t *testing.T) { }, "new cluster invalid cert": { mutateFn: func(t *testing.T, dir string) { - name := filepath.Join(dir, CertFileName) + name := filepath.Join(dir, certFileName) require.NoError(t, os.WriteFile(name, []byte("not-a-cert"), 0600)) }, expect: expect{ @@ -159,7 +437,7 @@ func Test_loadPersistedBootstrapConfig(t *testing.T) { }, "new cluster invalid CA": { mutateFn: func(t *testing.T, dir string) { - name := filepath.Join(dir, CAFileName) + name := filepath.Join(dir, caFileName) require.NoError(t, os.WriteFile(name, []byte("not-a-ca-cert"), 0600)) }, expect: expect{ @@ -167,10 +445,20 @@ func Test_loadPersistedBootstrapConfig(t *testing.T) { warning: "invalid CA certificate", }, }, + "new cluster invalid config flag": { + mutateFn: func(t *testing.T, dir string) { + name := filepath.Join(dir, configFileName) + require.NoError(t, os.WriteFile(name, []byte(`{"not_a_consul_agent_config_field" = "zap"}`), 0600)) + }, + expect: expect{ + loaded: false, + warning: "failed to parse local bootstrap config", + }, + }, "existing cluster invalid token": { existingCluster: true, mutateFn: func(t *testing.T, dir string) { - name := filepath.Join(dir, TokenFileName) + name := filepath.Join(dir, tokenFileName) require.NoError(t, os.WriteFile(name, []byte("not-a-uuid"), 0600)) }, expect: expect{ @@ -186,136 +474,3 @@ func Test_loadPersistedBootstrapConfig(t *testing.T) { }) } } - -func TestFetchBootstrapConfig(t *testing.T) { - type testCase struct { - expectFetchErr error - expectRetry bool - } - - run := func(t *testing.T, tc testCase) { - ui := cli.NewMockUi() - dataDir := testutil.TempDir(t, "fetch-bootstrap-cfg") - clientM := hcpclient.NewMockClient(t) - - if tc.expectFetchErr != nil && tc.expectRetry { - clientM.On("FetchBootstrap", mock.Anything). - Return(nil, tc.expectFetchErr) - } else if tc.expectFetchErr != nil && !tc.expectRetry { - clientM.On("FetchBootstrap", mock.Anything). - Return(nil, tc.expectFetchErr).Once() - } else { - validToken, err := uuid.GenerateUUID() - require.NoError(t, err) - clientM.EXPECT().FetchBootstrap(mock.Anything).Return(&hcpclient.BootstrapConfig{ - ManagementToken: validToken, - ConsulConfig: "{}", - }, nil).Once() - } - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - cfg, err := FetchBootstrapConfig(ctx, clientM, dataDir, ui) - - if tc.expectFetchErr == nil { - require.NoError(t, err) - require.NotNil(t, cfg) - return - } - - require.Error(t, err) - require.Nil(t, cfg) - if tc.expectRetry { - require.ErrorIs(t, err, context.DeadlineExceeded) - } else { - require.ErrorIs(t, err, tc.expectFetchErr) - } - } - - tt := map[string]testCase{ - "success": {}, - "unauthorized": { - expectFetchErr: hcpclient.ErrUnauthorized, - }, - "forbidden": { - expectFetchErr: hcpclient.ErrForbidden, - }, - "retryable fetch error": { - expectFetchErr: errors.New("error"), - expectRetry: true, - }, - } - - for name, tc := range tt { - t.Run(name, func(t *testing.T) { - run(t, tc) - }) - } -} - -func TestLoadManagementToken(t *testing.T) { - type testCase struct { - skipHCPConfigDir bool - skipTokenFile bool - tokenFileContent string - skipBootstrap bool - } - - validToken, err := uuid.GenerateUUID() - require.NoError(t, err) - - run := func(t *testing.T, tc testCase) { - dataDir := testutil.TempDir(t, "load-management-token") - - hcpCfgDir := filepath.Join(dataDir, constants.SubDir) - if !tc.skipHCPConfigDir { - err := os.Mkdir(hcpCfgDir, 0755) - require.NoError(t, err) - } - - tokenFilePath := filepath.Join(hcpCfgDir, TokenFileName) - if !tc.skipTokenFile { - err := os.WriteFile(tokenFilePath, []byte(tc.tokenFileContent), 0600) - require.NoError(t, err) - } - - clientM := hcpclient.NewMockClient(t) - if !tc.skipBootstrap { - clientM.EXPECT().FetchBootstrap(mock.Anything).Return(&hcpclient.BootstrapConfig{ - ManagementToken: validToken, - ConsulConfig: "{}", - }, nil).Once() - } - - token, err := LoadManagementToken(context.Background(), hclog.NewNullLogger(), clientM, dataDir) - require.NoError(t, err) - require.Equal(t, validToken, token) - - bytes, err := os.ReadFile(tokenFilePath) - require.NoError(t, err) - require.Equal(t, validToken, string(bytes)) - } - - tt := map[string]testCase{ - "token configured": { - skipBootstrap: true, - tokenFileContent: validToken, - }, - "no token configured": { - skipTokenFile: true, - }, - "invalid token configured": { - tokenFileContent: "invalid", - }, - "no hcp-config directory": { - skipHCPConfigDir: true, - skipTokenFile: true, - }, - } - - for name, tc := range tt { - t.Run(name, func(t *testing.T) { - run(t, tc) - }) - } -} diff --git a/agent/hcp/bootstrap/config-loader/loader.go b/agent/hcp/bootstrap/config-loader/loader.go deleted file mode 100644 index 05e8d19102259..0000000000000 --- a/agent/hcp/bootstrap/config-loader/loader.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -// Package loader handles loading the bootstrap agent config fetched from HCP into -// the agent's config. It must be a separate package from other HCP components -// because it has a dependency on agent/config while other components need to be -// imported and run within the server process in agent/consul and that would create -// a dependency cycle. -package loader - -import ( - "context" - "fmt" - "path/filepath" - - "github.com/hashicorp/consul/agent/config" - "github.com/hashicorp/consul/agent/hcp/bootstrap" - "github.com/hashicorp/consul/agent/hcp/bootstrap/constants" - hcpclient "github.com/hashicorp/consul/agent/hcp/client" -) - -type ConfigLoader func(source config.Source) (config.LoadResult, error) - -// LoadConfig will attempt to load previously-fetched config from disk and fall back to -// fetch from HCP servers if the local data is incomplete. -// It must be passed a (CLI) UI implementation so it can deliver progress -// updates to the user, for example if it is waiting to retry for a long period. -func LoadConfig(ctx context.Context, client hcpclient.Client, dataDir string, loader ConfigLoader, ui bootstrap.UI) (ConfigLoader, error) { - ui.Output("Loading configuration from HCP") - - // See if we have existing config on disk - // - // OPTIMIZE: We could probably be more intelligent about config loading. - // The currently implemented approach is: - // 1. Attempt to load data from disk - // 2. If that fails or the data is incomplete, block indefinitely fetching remote config. - // - // What if instead we had the following flow: - // 1. Attempt to fetch config from HCP. - // 2. If that fails, fall back to data on disk from last fetch. - // 3. If that fails, go into blocking loop to fetch remote config. - // - // This should allow us to more gracefully transition cases like when - // an existing cluster is linked, but then wants to receive TLS materials - // at a later time. Currently, if we observe the existing-cluster marker we - // don't attempt to fetch any additional configuration from HCP. - - cfg, ok := bootstrap.LoadPersistedBootstrapConfig(dataDir, ui) - if ok { - // Persisted bootstrap config exists, but needs to be validated - err := validatePersistedConfig(dataDir) - if err != nil { - ok = false - } - } - if !ok { - ui.Info("Fetching configuration from HCP servers") - - var err error - cfg, err = bootstrap.FetchBootstrapConfig(ctx, client, dataDir, ui) - if err != nil { - return nil, fmt.Errorf("failed to bootstrap from HCP: %w", err) - } - ui.Info("Configuration fetched from HCP and saved on local disk") - - } else { - ui.Info("Loaded HCP configuration from local disk") - - } - - // Create a new loader func to return - newLoader := bootstrapConfigLoader(loader, cfg) - return newLoader, nil -} - -func AddAclPolicyAccessControlHeader(baseLoader ConfigLoader) ConfigLoader { - return func(source config.Source) (config.LoadResult, error) { - res, err := baseLoader(source) - if err != nil { - return res, err - } - - rc := res.RuntimeConfig - - // HTTP response headers are modified for the HCP UI to work. - if rc.HTTPResponseHeaders == nil { - rc.HTTPResponseHeaders = make(map[string]string) - } - prevValue, ok := rc.HTTPResponseHeaders[accessControlHeaderName] - if !ok { - rc.HTTPResponseHeaders[accessControlHeaderName] = accessControlHeaderValue - } else { - rc.HTTPResponseHeaders[accessControlHeaderName] = prevValue + "," + accessControlHeaderValue - } - - return res, nil - } -} - -// bootstrapConfigLoader is a ConfigLoader for passing bootstrap JSON config received from HCP -// to the config.builder. ConfigLoaders are functions used to build an agent's RuntimeConfig -// from various sources like files and flags. This config is contained in the config.LoadResult. -// -// The flow to include bootstrap config from HCP as a loader's data source is as follows: -// -// 1. A base ConfigLoader function (baseLoader) is created on agent start, and it sets the input -// source argument as the DefaultConfig. -// -// 2. When a server agent can be configured by HCP that baseLoader is wrapped in this bootstrapConfigLoader. -// -// 3. The bootstrapConfigLoader calls that base loader with the bootstrap JSON config as the -// default source. This data will be merged with other valid sources in the config.builder. -// -// 4. The result of the call to baseLoader() below contains the resulting RuntimeConfig, and we do some -// additional modifications to attach data that doesn't get populated during the build in the config pkg. -// -// Note that since the ConfigJSON is stored as the baseLoader's DefaultConfig, its data is the first -// to be merged by the config.builder and could be overwritten by user-provided values in config files or -// CLI flags. However, values set to RuntimeConfig after the baseLoader call are final. -func bootstrapConfigLoader(baseLoader ConfigLoader, cfg *bootstrap.RawBootstrapConfig) ConfigLoader { - return func(source config.Source) (config.LoadResult, error) { - // Don't allow any further attempts to provide a DefaultSource. This should - // only ever be needed later in client agent AutoConfig code but that should - // be mutually exclusive from this bootstrapping mechanism since this is - // only for servers. If we ever try to change that, this clear failure - // should alert future developers that the assumptions are changing rather - // than quietly not applying the config they expect! - if source != nil { - return config.LoadResult{}, - fmt.Errorf("non-nil config source provided to a loader after HCP bootstrap already provided a DefaultSource") - } - - // Otherwise, just call to the loader we were passed with our own additional - // JSON as the source. - // - // OPTIMIZE: We could check/log whether any fields set by the remote config were overwritten by a user-provided flag. - res, err := baseLoader(config.FileSource{ - Name: "HCP Bootstrap", - Format: "json", - Data: cfg.ConfigJSON, - }) - if err != nil { - return res, fmt.Errorf("failed to load HCP Bootstrap config: %w", err) - } - - finalizeRuntimeConfig(res.RuntimeConfig, cfg) - return res, nil - } -} - -const ( - accessControlHeaderName = "Access-Control-Expose-Headers" - accessControlHeaderValue = "x-consul-default-acl-policy" -) - -// finalizeRuntimeConfig will set additional HCP-specific values that are not -// handled by the config.builder. -func finalizeRuntimeConfig(rc *config.RuntimeConfig, cfg *bootstrap.RawBootstrapConfig) { - rc.Cloud.ManagementToken = cfg.ManagementToken -} - -// validatePersistedConfig attempts to load persisted config to check for errors and basic validity. -// Errors here will raise issues like referencing unsupported config fields. -func validatePersistedConfig(dataDir string) error { - filename := filepath.Join(dataDir, constants.SubDir, bootstrap.ConfigFileName) - _, err := config.Load(config.LoadOpts{ - ConfigFiles: []string{filename}, - HCL: []string{ - "server = true", - `bind_addr = "127.0.0.1"`, - fmt.Sprintf("data_dir = %q", dataDir), - }, - ConfigFormat: "json", - }) - if err != nil { - return fmt.Errorf("failed to parse local bootstrap config: %w", err) - } - return nil -} diff --git a/agent/hcp/bootstrap/config-loader/loader_test.go b/agent/hcp/bootstrap/config-loader/loader_test.go deleted file mode 100644 index 8171c6c30f417..0000000000000 --- a/agent/hcp/bootstrap/config-loader/loader_test.go +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package loader - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "net/http/httptest" - "os" - "path/filepath" - "testing" - - "github.com/mitchellh/cli" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/agent/config" - "github.com/hashicorp/consul/agent/hcp" - "github.com/hashicorp/consul/agent/hcp/bootstrap" - "github.com/hashicorp/consul/agent/hcp/bootstrap/constants" - hcpclient "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/lib" -) - -func TestBootstrapConfigLoader(t *testing.T) { - baseLoader := func(source config.Source) (config.LoadResult, error) { - return config.Load(config.LoadOpts{ - DefaultConfig: source, - HCL: []string{ - `server = true`, - `bind_addr = "127.0.0.1"`, - `data_dir = "/tmp/consul-data"`, - }, - }) - } - - bootstrapLoader := func(source config.Source) (config.LoadResult, error) { - return bootstrapConfigLoader(baseLoader, &bootstrap.RawBootstrapConfig{ - ConfigJSON: `{"bootstrap_expect": 8}`, - ManagementToken: "test-token", - })(source) - } - - result, err := bootstrapLoader(nil) - require.NoError(t, err) - - // bootstrap_expect and management token are injected from bootstrap config received from HCP. - require.Equal(t, 8, result.RuntimeConfig.BootstrapExpect) - require.Equal(t, "test-token", result.RuntimeConfig.Cloud.ManagementToken) -} - -func Test_finalizeRuntimeConfig(t *testing.T) { - type testCase struct { - rc *config.RuntimeConfig - cfg *bootstrap.RawBootstrapConfig - verifyFn func(t *testing.T, rc *config.RuntimeConfig) - } - run := func(t *testing.T, tc testCase) { - finalizeRuntimeConfig(tc.rc, tc.cfg) - tc.verifyFn(t, tc.rc) - } - - tt := map[string]testCase{ - "set management token": { - rc: &config.RuntimeConfig{}, - cfg: &bootstrap.RawBootstrapConfig{ - ManagementToken: "test-token", - }, - verifyFn: func(t *testing.T, rc *config.RuntimeConfig) { - require.Equal(t, "test-token", rc.Cloud.ManagementToken) - }, - }, - } - - for name, tc := range tt { - t.Run(name, func(t *testing.T) { - run(t, tc) - }) - } -} - -func Test_AddAclPolicyAccessControlHeader(t *testing.T) { - type testCase struct { - baseLoader ConfigLoader - verifyFn func(t *testing.T, rc *config.RuntimeConfig) - } - run := func(t *testing.T, tc testCase) { - loader := AddAclPolicyAccessControlHeader(tc.baseLoader) - result, err := loader(nil) - require.NoError(t, err) - tc.verifyFn(t, result.RuntimeConfig) - } - - tt := map[string]testCase{ - "append to header if present": { - baseLoader: func(source config.Source) (config.LoadResult, error) { - return config.Load(config.LoadOpts{ - DefaultConfig: config.DefaultSource(), - HCL: []string{ - `server = true`, - `bind_addr = "127.0.0.1"`, - `data_dir = "/tmp/consul-data"`, - fmt.Sprintf(`http_config = { response_headers = { %s = "test" } }`, accessControlHeaderName), - }, - }) - }, - verifyFn: func(t *testing.T, rc *config.RuntimeConfig) { - require.Equal(t, "test,x-consul-default-acl-policy", rc.HTTPResponseHeaders[accessControlHeaderName]) - }, - }, - "set header if not present": { - baseLoader: func(source config.Source) (config.LoadResult, error) { - return config.Load(config.LoadOpts{ - DefaultConfig: config.DefaultSource(), - HCL: []string{ - `server = true`, - `bind_addr = "127.0.0.1"`, - `data_dir = "/tmp/consul-data"`, - }, - }) - }, - verifyFn: func(t *testing.T, rc *config.RuntimeConfig) { - require.Equal(t, "x-consul-default-acl-policy", rc.HTTPResponseHeaders[accessControlHeaderName]) - }, - }, - } - - for name, tc := range tt { - t.Run(name, func(t *testing.T) { - run(t, tc) - }) - } -} - -func boolPtr(value bool) *bool { - return &value -} - -func TestLoadConfig_Persistence(t *testing.T) { - type testCase struct { - // resourceID is the HCP resource ID. If set, a server is considered to be cloud-enabled. - resourceID string - - // devMode indicates whether the loader should not have a data directory. - devMode bool - - // verifyFn issues case-specific assertions. - verifyFn func(t *testing.T, rc *config.RuntimeConfig) - } - - run := func(t *testing.T, tc testCase) { - dir, err := os.MkdirTemp(os.TempDir(), "bootstrap-test-") - require.NoError(t, err) - t.Cleanup(func() { os.RemoveAll(dir) }) - - s := hcp.NewMockHCPServer() - s.AddEndpoint(bootstrap.TestEndpoint()) - - // Use an HTTPS server since that's what the HCP SDK expects for auth. - srv := httptest.NewTLSServer(s) - defer srv.Close() - - caCert, err := x509.ParseCertificate(srv.TLS.Certificates[0].Certificate[0]) - require.NoError(t, err) - - pool := x509.NewCertPool() - pool.AddCert(caCert) - clientTLS := &tls.Config{RootCAs: pool} - - baseOpts := config.LoadOpts{ - HCL: []string{ - `server = true`, - `bind_addr = "127.0.0.1"`, - fmt.Sprintf(`http_config = { response_headers = { %s = "Content-Encoding" } }`, accessControlHeaderName), - fmt.Sprintf(`cloud { client_id="test" client_secret="test" hostname=%q auth_url=%q resource_id=%q }`, - srv.Listener.Addr().String(), srv.URL, tc.resourceID), - }, - } - if tc.devMode { - baseOpts.DevMode = boolPtr(true) - } else { - baseOpts.HCL = append(baseOpts.HCL, fmt.Sprintf(`data_dir = %q`, dir)) - } - - baseLoader := func(source config.Source) (config.LoadResult, error) { - baseOpts.DefaultConfig = source - return config.Load(baseOpts) - } - - ui := cli.NewMockUi() - - // Load initial config to check whether bootstrapping from HCP is enabled. - initial, err := baseLoader(nil) - require.NoError(t, err) - - // Override the client TLS config so that the test server can be trusted. - initial.RuntimeConfig.Cloud.WithTLSConfig(clientTLS) - client, err := hcpclient.NewClient(initial.RuntimeConfig.Cloud) - require.NoError(t, err) - - loader, err := LoadConfig(context.Background(), client, initial.RuntimeConfig.DataDir, baseLoader, ui) - require.NoError(t, err) - - // Load the agent config with the potentially wrapped loader. - fromRemote, err := loader(nil) - require.NoError(t, err) - - // HCP-enabled cases should fetch from HCP on the first run of LoadConfig. - require.Contains(t, ui.OutputWriter.String(), "Fetching configuration from HCP") - - // Run case-specific verification. - tc.verifyFn(t, fromRemote.RuntimeConfig) - - require.Empty(t, fromRemote.RuntimeConfig.ACLInitialManagementToken, - "initial_management token should have been sanitized") - - if tc.devMode { - // Re-running the bootstrap func below isn't relevant to dev mode - // since they don't have a data directory to load data from. - return - } - - // Run LoadConfig again to exercise the logic of loading config from disk. - loader, err = LoadConfig(context.Background(), client, initial.RuntimeConfig.DataDir, baseLoader, ui) - require.NoError(t, err) - - fromDisk, err := loader(nil) - require.NoError(t, err) - - // HCP-enabled cases should fetch from disk on the second run. - require.Contains(t, ui.OutputWriter.String(), "Loaded HCP configuration from local disk") - - // Config loaded from disk should be the same as the one that was initially fetched from the HCP servers. - require.Equal(t, fromRemote.RuntimeConfig, fromDisk.RuntimeConfig) - } - - tt := map[string]testCase{ - "dev mode": { - devMode: true, - - resourceID: "organization/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + - "project/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + - "consul.cluster/new-cluster-id", - - verifyFn: func(t *testing.T, rc *config.RuntimeConfig) { - require.Empty(t, rc.DataDir) - - // Dev mode should have persisted certs since they can't be inlined. - require.NotEmpty(t, rc.TLS.HTTPS.CertFile) - require.NotEmpty(t, rc.TLS.HTTPS.KeyFile) - require.NotEmpty(t, rc.TLS.HTTPS.CAFile) - - // Find the temporary directory they got stored in. - dir := filepath.Dir(rc.TLS.HTTPS.CertFile) - - // Ensure we only stored the TLS materials. - entries, err := os.ReadDir(dir) - require.NoError(t, err) - require.Len(t, entries, 3) - - haveFiles := make([]string, 3) - for i, entry := range entries { - haveFiles[i] = entry.Name() - } - - wantFiles := []string{bootstrap.CAFileName, bootstrap.CertFileName, bootstrap.KeyFileName} - require.ElementsMatch(t, wantFiles, haveFiles) - }, - }, - "new cluster": { - resourceID: "organization/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + - "project/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + - "consul.cluster/new-cluster-id", - - // New clusters should have received and persisted the whole suite of config. - verifyFn: func(t *testing.T, rc *config.RuntimeConfig) { - dir := filepath.Join(rc.DataDir, constants.SubDir) - - entries, err := os.ReadDir(dir) - require.NoError(t, err) - require.Len(t, entries, 6) - - files := []string{ - filepath.Join(dir, bootstrap.ConfigFileName), - filepath.Join(dir, bootstrap.CAFileName), - filepath.Join(dir, bootstrap.CertFileName), - filepath.Join(dir, bootstrap.KeyFileName), - filepath.Join(dir, bootstrap.TokenFileName), - filepath.Join(dir, bootstrap.SuccessFileName), - } - for _, name := range files { - _, err := os.Stat(name) - require.NoError(t, err) - } - - require.Equal(t, filepath.Join(dir, bootstrap.CertFileName), rc.TLS.HTTPS.CertFile) - require.Equal(t, filepath.Join(dir, bootstrap.KeyFileName), rc.TLS.HTTPS.KeyFile) - require.Equal(t, filepath.Join(dir, bootstrap.CAFileName), rc.TLS.HTTPS.CAFile) - - cert, key, caCerts, err := bootstrap.LoadCerts(dir) - require.NoError(t, err) - - require.NoError(t, bootstrap.ValidateTLSCerts(cert, key, caCerts)) - }, - }, - "existing cluster": { - resourceID: "organization/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + - "project/0b9de9a3-8403-4ca6-aba8-fca752f42100/" + - "consul.cluster/" + bootstrap.TestExistingClusterID, - - // Existing clusters should have only received and persisted the management token. - verifyFn: func(t *testing.T, rc *config.RuntimeConfig) { - dir := filepath.Join(rc.DataDir, constants.SubDir) - - entries, err := os.ReadDir(dir) - require.NoError(t, err) - require.Len(t, entries, 3) - - files := []string{ - filepath.Join(dir, bootstrap.TokenFileName), - filepath.Join(dir, bootstrap.SuccessFileName), - filepath.Join(dir, bootstrap.ConfigFileName), - } - for _, name := range files { - _, err := os.Stat(name) - require.NoError(t, err) - } - }, - }, - } - - for name, tc := range tt { - t.Run(name, func(t *testing.T) { - run(t, tc) - }) - } -} - -func TestValidatePersistedConfig(t *testing.T) { - type testCase struct { - configContents string - expectErr string - } - - run := func(t *testing.T, tc testCase) { - dataDir, err := os.MkdirTemp(os.TempDir(), "load-bootstrap-test-") - require.NoError(t, err) - t.Cleanup(func() { os.RemoveAll(dataDir) }) - - dir := filepath.Join(dataDir, constants.SubDir) - require.NoError(t, lib.EnsurePath(dir, true)) - - if tc.configContents != "" { - name := filepath.Join(dir, bootstrap.ConfigFileName) - require.NoError(t, os.WriteFile(name, []byte(tc.configContents), 0600)) - } - - err = validatePersistedConfig(dataDir) - if tc.expectErr != "" { - require.Error(t, err) - require.Contains(t, err.Error(), tc.expectErr) - } else { - require.NoError(t, err) - } - } - - tt := map[string]testCase{ - "valid": { - configContents: `{"bootstrap_expect": 1, "cloud": {"resource_id": "id"}}`, - }, - "invalid config key": { - configContents: `{"not_a_consul_agent_config_field": "zap"}`, - expectErr: "invalid config key not_a_consul_agent_config_field", - }, - "invalid format": { - configContents: `{"not_json" = "invalid"}`, - expectErr: "invalid character '=' after object key", - }, - "missing configuration file": { - expectErr: "no such file or directory", - }, - } - - for name, tc := range tt { - t.Run(name, func(t *testing.T) { - run(t, tc) - }) - } -} diff --git a/agent/hcp/bootstrap/constants/constants.go b/agent/hcp/bootstrap/constants/constants.go deleted file mode 100644 index 1f39bf4712dad..0000000000000 --- a/agent/hcp/bootstrap/constants/constants.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -// Package constants declares some constants for use in the HCP bootstrapping -// process. It is in its own package with no other dependencies in order -// to avoid a dependency cycle. -package constants - -const SubDir = "hcp-config" diff --git a/agent/hcp/bootstrap/testing.go b/agent/hcp/bootstrap/testing.go index f073d17183444..a10a5d2bc8add 100644 --- a/agent/hcp/bootstrap/testing.go +++ b/agent/hcp/bootstrap/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package bootstrap diff --git a/agent/hcp/client/client.go b/agent/hcp/client/client.go index 41f3eb97749ed..abafdd9118dc1 100644 --- a/agent/hcp/client/client.go +++ b/agent/hcp/client/client.go @@ -1,20 +1,16 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package client import ( "context" - "errors" "fmt" - "net/http" - "net/url" "strconv" "time" httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" - "golang.org/x/oauth2" hcptelemetry "github.com/hashicorp/hcp-sdk-go/clients/cloud-consul-telemetry-gateway/preview/2023-04-14/client/consul_telemetry_service" hcpgnm "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/client/global_network_manager_service" @@ -35,10 +31,8 @@ const metricsGatewayPath = "/v1/metrics" type Client interface { FetchBootstrap(ctx context.Context) (*BootstrapConfig, error) FetchTelemetryConfig(ctx context.Context) (*TelemetryConfig, error) - GetObservabilitySecret(ctx context.Context) (clientID, clientSecret string, err error) PushServerStatus(ctx context.Context, status *ServerStatus) error DiscoverServers(ctx context.Context) ([]string, error) - GetCluster(ctx context.Context) (*Cluster, error) } type BootstrapConfig struct { @@ -52,12 +46,6 @@ type BootstrapConfig struct { ManagementToken string } -type Cluster struct { - Name string - HCPPortalURL string - AccessLevel *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel -} - type hcpClient struct { hc *httptransport.Runtime cfg config.CloudConfig @@ -129,8 +117,9 @@ func (c *hcpClient) FetchBootstrap(ctx context.Context) (*BootstrapConfig, error resp, err := c.gnm.AgentBootstrapConfig(params, nil) if err != nil { - return nil, decodeError(err) + return nil, err } + return bootstrapConfigFromHCP(resp.Payload), nil } @@ -322,70 +311,3 @@ func (c *hcpClient) DiscoverServers(ctx context.Context) ([]string, error) { return servers, nil } - -func (c *hcpClient) GetCluster(ctx context.Context) (*Cluster, error) { - params := hcpgnm.NewGetClusterParamsWithContext(ctx). - WithID(c.resource.ID). - WithLocationOrganizationID(c.resource.Organization). - WithLocationProjectID(c.resource.Project) - - resp, err := c.gnm.GetCluster(params, nil) - if err != nil { - return nil, decodeError(err) - } - - return clusterFromHCP(resp.Payload), nil -} - -func clusterFromHCP(payload *gnmmod.HashicorpCloudGlobalNetworkManager20220215GetClusterResponse) *Cluster { - return &Cluster{ - Name: payload.Cluster.ID, - AccessLevel: payload.Cluster.ConsulAccessLevel, - HCPPortalURL: payload.Cluster.HcpPortalURL, - } -} - -func decodeError(err error) error { - // Determine the code from the type of error - var code int - switch e := err.(type) { - case *url.Error: - oauthErr, ok := errors.Unwrap(e.Err).(*oauth2.RetrieveError) - if ok { - code = oauthErr.Response.StatusCode - } - case *hcpgnm.AgentBootstrapConfigDefault: - code = e.Code() - case *hcpgnm.GetClusterDefault: - code = e.Code() - } - - // Return specific error for codes if relevant - switch code { - case http.StatusUnauthorized: - return ErrUnauthorized - case http.StatusForbidden: - return ErrForbidden - } - - return err -} - -func (c *hcpClient) GetObservabilitySecret(ctx context.Context) (string, string, error) { - params := hcpgnm.NewGetObservabilitySecretParamsWithContext(ctx). - WithID(c.resource.ID). - WithLocationOrganizationID(c.resource.Organization). - WithLocationProjectID(c.resource.Project) - - resp, err := c.gnm.GetObservabilitySecret(params, nil) - if err != nil { - return "", "", err - } - - if len(resp.GetPayload().Keys) == 0 { - return "", "", fmt.Errorf("no observability keys returned for cluster") - } - - key := resp.GetPayload().Keys[len(resp.GetPayload().Keys)-1] - return key.ClientID, key.ClientSecret, nil -} diff --git a/agent/hcp/client/client_test.go b/agent/hcp/client/client_test.go index c9c5bc63f2dd7..5571630ad45a3 100644 --- a/agent/hcp/client/client_test.go +++ b/agent/hcp/client/client_test.go @@ -32,9 +32,6 @@ func (m *mockTGW) QueryRangeBatch(params *hcptelemetry.QueryRangeBatchParams, au return hcptelemetry.NewQueryRangeBatchOK(), nil } func (m *mockTGW) SetTransport(transport runtime.ClientTransport) {} -func (m *mockTGW) GetServiceTopology(params *hcptelemetry.GetServiceTopologyParams, authInfo runtime.ClientAuthInfoWriter, opts ...hcptelemetry.ClientOption) (*hcptelemetry.GetServiceTopologyOK, error) { - return hcptelemetry.NewGetServiceTopologyOK(), nil -} type expectedTelemetryCfg struct { endpoint string diff --git a/agent/hcp/client/errors.go b/agent/hcp/client/errors.go deleted file mode 100644 index 5f07169792477..0000000000000 --- a/agent/hcp/client/errors.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package client - -import "errors" - -var ( - ErrUnauthorized = errors.New("unauthorized") - ErrForbidden = errors.New("forbidden") -) diff --git a/agent/hcp/client/http_client.go b/agent/hcp/client/http_client.go deleted file mode 100644 index 4854f8c022cd2..0000000000000 --- a/agent/hcp/client/http_client.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package client - -import ( - "crypto/tls" - "net/http" - "time" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-retryablehttp" - "golang.org/x/oauth2" -) - -const ( - // HTTP Client config - defaultStreamTimeout = 15 * time.Second - - // Retry config - // TODO: Eventually, we'd like to configure these values dynamically. - defaultRetryWaitMin = 1 * time.Second - defaultRetryWaitMax = 15 * time.Second - // defaultRetryMax is set to 0 to turn off retry functionality, until dynamic configuration is possible. - // This is to circumvent any spikes in load that may cause or exacerbate server-side issues for now. - defaultRetryMax = 0 -) - -// NewHTTPClient configures the retryable HTTP client. -func NewHTTPClient(tlsCfg *tls.Config, source oauth2.TokenSource) *retryablehttp.Client { - tlsTransport := cleanhttp.DefaultPooledTransport() - tlsTransport.TLSClientConfig = tlsCfg - - var transport http.RoundTripper = &oauth2.Transport{ - Base: tlsTransport, - Source: source, - } - - client := &http.Client{ - Transport: transport, - Timeout: defaultStreamTimeout, - } - - retryClient := &retryablehttp.Client{ - HTTPClient: client, - // We already log failed requests elsewhere, we pass a null logger here to avoid redundant logs. - Logger: hclog.NewNullLogger(), - RetryWaitMin: defaultRetryWaitMin, - RetryWaitMax: defaultRetryWaitMax, - RetryMax: defaultRetryMax, - CheckRetry: retryablehttp.DefaultRetryPolicy, - Backoff: retryablehttp.DefaultBackoff, - } - - return retryClient -} diff --git a/agent/hcp/client/http_client_test.go b/agent/hcp/client/http_client_test.go deleted file mode 100644 index b8971040892ad..0000000000000 --- a/agent/hcp/client/http_client_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package client - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/hashicorp/consul/agent/hcp/config" - "github.com/stretchr/testify/require" -) - -func TestNewHTTPClient(t *testing.T) { - mockCfg := config.MockCloudCfg{} - mockHCPCfg, err := mockCfg.HCPConfig() - require.NoError(t, err) - - client := NewHTTPClient(mockHCPCfg.APITLSConfig(), mockHCPCfg) - require.NotNil(t, client) - - var req *http.Request - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req = r - })) - _, err = client.Get(srv.URL) - require.NoError(t, err) - require.Equal(t, "Bearer test-token", req.Header.Get("Authorization")) -} diff --git a/agent/hcp/client/metrics_client.go b/agent/hcp/client/metrics_client.go index 47dd7d52800f9..b3c1c6a6b3dc8 100644 --- a/agent/hcp/client/metrics_client.go +++ b/agent/hcp/client/metrics_client.go @@ -6,55 +6,126 @@ package client import ( "bytes" "context" - "errors" "fmt" "io" "net/http" + "time" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-retryablehttp" + hcpcfg "github.com/hashicorp/hcp-sdk-go/config" + "github.com/hashicorp/hcp-sdk-go/resource" colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" + "golang.org/x/oauth2" "google.golang.org/protobuf/proto" "github.com/hashicorp/consul/agent/hcp/telemetry" + "github.com/hashicorp/consul/version" ) const ( + // HTTP Client config + defaultStreamTimeout = 15 * time.Second + + // Retry config + // TODO: Eventually, we'd like to configure these values dynamically. + defaultRetryWaitMin = 1 * time.Second + defaultRetryWaitMax = 15 * time.Second + // defaultRetryMax is set to 0 to turn off retry functionality, until dynamic configuration is possible. + // This is to circumvent any spikes in load that may cause or exacerbate server-side issues for now. + defaultRetryMax = 0 + // defaultErrRespBodyLength refers to the max character length of the body on a failure to export metrics. // anything beyond we will truncate. defaultErrRespBodyLength = 100 ) -// MetricsClientProvider provides the retryable HTTP client and headers to use for exporting metrics -// by the metrics client. -type MetricsClientProvider interface { - GetHTTPClient() *retryablehttp.Client - GetHeader() http.Header +// cloudConfig represents cloud config for TLS abstracted in an interface for easy testing. +type CloudConfig interface { + HCPConfig(opts ...hcpcfg.HCPConfigOption) (hcpcfg.HCPConfig, error) + Resource() (resource.Resource, error) } // otlpClient is an implementation of MetricsClient with a retryable http client for retries and to honor throttle. // It also holds default HTTP headers to add to export requests. type otlpClient struct { - provider MetricsClientProvider + client *retryablehttp.Client + header *http.Header } // NewMetricsClient returns a configured MetricsClient. // The current implementation uses otlpClient to provide retry functionality. -func NewMetricsClient(ctx context.Context, provider MetricsClientProvider) telemetry.MetricsClient { +func NewMetricsClient(ctx context.Context, cfg CloudConfig) (telemetry.MetricsClient, error) { + if cfg == nil { + return nil, fmt.Errorf("failed to init telemetry client: provide valid cloudCfg (Cloud Configuration for TLS)") + } + + if ctx == nil { + return nil, fmt.Errorf("failed to init telemetry client: provide a valid context") + } + + logger := hclog.FromContext(ctx) + + c, err := newHTTPClient(cfg, logger) + if err != nil { + return nil, fmt.Errorf("failed to init telemetry client: %v", err) + } + + r, err := cfg.Resource() + if err != nil { + return nil, fmt.Errorf("failed to init telemetry client: %v", err) + } + + header := make(http.Header) + header.Set("content-type", "application/x-protobuf") + header.Set("x-hcp-resource-id", r.String()) + header.Set("x-channel", fmt.Sprintf("consul/%s", version.GetHumanVersion())) + return &otlpClient{ - provider: provider, + client: c, + header: &header, + }, nil +} + +// newHTTPClient configures the retryable HTTP client. +func newHTTPClient(cloudCfg CloudConfig, logger hclog.Logger) (*retryablehttp.Client, error) { + hcpCfg, err := cloudCfg.HCPConfig() + if err != nil { + return nil, err } + + tlsTransport := cleanhttp.DefaultPooledTransport() + tlsTransport.TLSClientConfig = hcpCfg.APITLSConfig() + + var transport http.RoundTripper = &oauth2.Transport{ + Base: tlsTransport, + Source: hcpCfg, + } + + client := &http.Client{ + Transport: transport, + Timeout: defaultStreamTimeout, + } + + retryClient := &retryablehttp.Client{ + HTTPClient: client, + Logger: logger.Named("hcp_telemetry_client"), + RetryWaitMin: defaultRetryWaitMin, + RetryWaitMax: defaultRetryWaitMax, + RetryMax: defaultRetryMax, + CheckRetry: retryablehttp.DefaultRetryPolicy, + Backoff: retryablehttp.DefaultBackoff, + } + + return retryClient, nil } // ExportMetrics is the single method exposed by MetricsClient to export OTLP metrics to the desired HCP endpoint. // The endpoint is configurable as the endpoint can change during periodic refresh of CCM telemetry config. // By configuring the endpoint here, we can re-use the same client and override the endpoint when making a request. func (o *otlpClient) ExportMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics, endpoint string) error { - client := o.provider.GetHTTPClient() - if client == nil { - return errors.New("http client not configured") - } - pbRequest := &colmetricpb.ExportMetricsServiceRequest{ ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics}, } @@ -68,9 +139,9 @@ func (o *otlpClient) ExportMetrics(ctx context.Context, protoMetrics *metricpb.R if err != nil { return fmt.Errorf("failed to create request: %w", err) } - req.Header = o.provider.GetHeader() + req.Header = *o.header - resp, err := client.Do(req.WithContext(ctx)) + resp, err := o.client.Do(req.WithContext(ctx)) if err != nil { return fmt.Errorf("failed to post metrics: %w", err) } diff --git a/agent/hcp/client/metrics_client_test.go b/agent/hcp/client/metrics_client_test.go index cea6efca3750b..20a5f010ec4cd 100644 --- a/agent/hcp/client/metrics_client_test.go +++ b/agent/hcp/client/metrics_client_test.go @@ -5,6 +5,7 @@ package client import ( "context" + "fmt" "math/rand" "net/http" "net/http/httptest" @@ -15,26 +16,55 @@ import ( metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" "google.golang.org/protobuf/proto" - "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/consul/version" ) -type mockClientProvider struct { - client *retryablehttp.Client - header *http.Header -} - -func (m *mockClientProvider) GetHTTPClient() *retryablehttp.Client { return m.client } -func (m *mockClientProvider) GetHeader() http.Header { return m.header.Clone() } - -func newMockClientProvider() *mockClientProvider { - header := make(http.Header) - header.Set("content-type", "application/x-protobuf") - - client := retryablehttp.NewClient() +func TestNewMetricsClient(t *testing.T) { + for name, test := range map[string]struct { + wantErr string + cfg CloudConfig + ctx context.Context + }{ + "success": { + cfg: &MockCloudCfg{}, + ctx: context.Background(), + }, + "failsWithoutCloudCfg": { + wantErr: "failed to init telemetry client: provide valid cloudCfg (Cloud Configuration for TLS)", + cfg: nil, + ctx: context.Background(), + }, + "failsWithoutContext": { + wantErr: "failed to init telemetry client: provide a valid context", + cfg: MockCloudCfg{}, + ctx: nil, + }, + "failsHCPConfig": { + wantErr: "failed to init telemetry client", + cfg: MockCloudCfg{ + ConfigErr: fmt.Errorf("test bad hcp config"), + }, + ctx: context.Background(), + }, + "failsBadResource": { + wantErr: "failed to init telemetry client", + cfg: MockCloudCfg{ + ResourceErr: fmt.Errorf("test bad resource"), + }, + ctx: context.Background(), + }, + } { + t.Run(name, func(t *testing.T) { + client, err := NewMetricsClient(test.ctx, test.cfg) + if test.wantErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), test.wantErr) + return + } - return &mockClientProvider{ - header: &header, - client: client, + require.Nil(t, err) + require.NotNil(t, client) + }) } } @@ -53,7 +83,6 @@ func TestExportMetrics(t *testing.T) { wantErr string status int largeBodyError bool - mutateProvider func(*mockClientProvider) }{ "success": { status: http.StatusOK, @@ -67,17 +96,14 @@ func TestExportMetrics(t *testing.T) { wantErr: "failed to export metrics: code 400", largeBodyError: true, }, - "failsWithClientNotConfigured": { - mutateProvider: func(m *mockClientProvider) { - m.client = nil - }, - wantErr: "http client not configured", - }, } { t.Run(name, func(t *testing.T) { randomBody := randStringRunes(1000) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { require.Equal(t, r.Header.Get("content-type"), "application/x-protobuf") + require.Equal(t, r.Header.Get("x-hcp-resource-id"), testResourceID) + require.Equal(t, r.Header.Get("x-channel"), fmt.Sprintf("consul/%s", version.GetHumanVersion())) + require.Equal(t, r.Header.Get("Authorization"), "Bearer test-token") body := colpb.ExportMetricsServiceResponse{} bytes, err := proto.Marshal(&body) @@ -95,15 +121,12 @@ func TestExportMetrics(t *testing.T) { })) defer srv.Close() - provider := newMockClientProvider() - if test.mutateProvider != nil { - test.mutateProvider(provider) - } - client := NewMetricsClient(context.Background(), provider) + client, err := NewMetricsClient(context.Background(), MockCloudCfg{}) + require.NoError(t, err) ctx := context.Background() metrics := &metricpb.ResourceMetrics{} - err := client.ExportMetrics(ctx, metrics, srv.URL) + err = client.ExportMetrics(ctx, metrics, srv.URL) if test.wantErr != "" { require.Error(t, err) diff --git a/agent/hcp/client/mock_Client.go b/agent/hcp/client/mock_Client.go index 8e5437c22ddf1..06853ceb86f76 100644 --- a/agent/hcp/client/mock_Client.go +++ b/agent/hcp/client/mock_Client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.39.2. DO NOT EDIT. +// Code generated by mockery v2.22.1. DO NOT EDIT. package client @@ -25,10 +25,6 @@ func (_m *MockClient) EXPECT() *MockClient_Expecter { func (_m *MockClient) DiscoverServers(ctx context.Context) ([]string, error) { ret := _m.Called(ctx) - if len(ret) == 0 { - panic("no return value specified for DiscoverServers") - } - var r0 []string var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { @@ -83,10 +79,6 @@ func (_c *MockClient_DiscoverServers_Call) RunAndReturn(run func(context.Context func (_m *MockClient) FetchBootstrap(ctx context.Context) (*BootstrapConfig, error) { ret := _m.Called(ctx) - if len(ret) == 0 { - panic("no return value specified for FetchBootstrap") - } - var r0 *BootstrapConfig var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*BootstrapConfig, error)); ok { @@ -141,10 +133,6 @@ func (_c *MockClient_FetchBootstrap_Call) RunAndReturn(run func(context.Context) func (_m *MockClient) FetchTelemetryConfig(ctx context.Context) (*TelemetryConfig, error) { ret := _m.Called(ctx) - if len(ret) == 0 { - panic("no return value specified for FetchTelemetryConfig") - } - var r0 *TelemetryConfig var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*TelemetryConfig, error)); ok { @@ -195,135 +183,10 @@ func (_c *MockClient_FetchTelemetryConfig_Call) RunAndReturn(run func(context.Co return _c } -// GetCluster provides a mock function with given fields: ctx -func (_m *MockClient) GetCluster(ctx context.Context) (*Cluster, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GetCluster") - } - - var r0 *Cluster - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*Cluster, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *Cluster); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*Cluster) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// MockClient_GetCluster_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCluster' -type MockClient_GetCluster_Call struct { - *mock.Call -} - -// GetCluster is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockClient_Expecter) GetCluster(ctx interface{}) *MockClient_GetCluster_Call { - return &MockClient_GetCluster_Call{Call: _e.mock.On("GetCluster", ctx)} -} - -func (_c *MockClient_GetCluster_Call) Run(run func(ctx context.Context)) *MockClient_GetCluster_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *MockClient_GetCluster_Call) Return(_a0 *Cluster, _a1 error) *MockClient_GetCluster_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *MockClient_GetCluster_Call) RunAndReturn(run func(context.Context) (*Cluster, error)) *MockClient_GetCluster_Call { - _c.Call.Return(run) - return _c -} - -// GetObservabilitySecret provides a mock function with given fields: ctx -func (_m *MockClient) GetObservabilitySecret(ctx context.Context) (string, string, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GetObservabilitySecret") - } - - var r0 string - var r1 string - var r2 error - if rf, ok := ret.Get(0).(func(context.Context) (string, string, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) string); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(string) - } - - if rf, ok := ret.Get(1).(func(context.Context) string); ok { - r1 = rf(ctx) - } else { - r1 = ret.Get(1).(string) - } - - if rf, ok := ret.Get(2).(func(context.Context) error); ok { - r2 = rf(ctx) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// MockClient_GetObservabilitySecret_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetObservabilitySecret' -type MockClient_GetObservabilitySecret_Call struct { - *mock.Call -} - -// GetObservabilitySecret is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockClient_Expecter) GetObservabilitySecret(ctx interface{}) *MockClient_GetObservabilitySecret_Call { - return &MockClient_GetObservabilitySecret_Call{Call: _e.mock.On("GetObservabilitySecret", ctx)} -} - -func (_c *MockClient_GetObservabilitySecret_Call) Run(run func(ctx context.Context)) *MockClient_GetObservabilitySecret_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *MockClient_GetObservabilitySecret_Call) Return(clientID string, clientSecret string, err error) *MockClient_GetObservabilitySecret_Call { - _c.Call.Return(clientID, clientSecret, err) - return _c -} - -func (_c *MockClient_GetObservabilitySecret_Call) RunAndReturn(run func(context.Context) (string, string, error)) *MockClient_GetObservabilitySecret_Call { - _c.Call.Return(run) - return _c -} - // PushServerStatus provides a mock function with given fields: ctx, status func (_m *MockClient) PushServerStatus(ctx context.Context, status *ServerStatus) error { ret := _m.Called(ctx, status) - if len(ret) == 0 { - panic("no return value specified for PushServerStatus") - } - var r0 error if rf, ok := ret.Get(0).(func(context.Context, *ServerStatus) error); ok { r0 = rf(ctx, status) @@ -363,12 +226,13 @@ func (_c *MockClient_PushServerStatus_Call) RunAndReturn(run func(context.Contex return _c } -// NewMockClient creates a new instance of MockClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockClient(t interface { +type mockConstructorTestingTNewMockClient interface { mock.TestingT Cleanup(func()) -}) *MockClient { +} + +// NewMockClient creates a new instance of MockClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMockClient(t mockConstructorTestingTNewMockClient) *MockClient { mock := &MockClient{} mock.Mock.Test(t) diff --git a/agent/hcp/config/mock_CloudConfig.go b/agent/hcp/client/mock_CloudConfig.go similarity index 98% rename from agent/hcp/config/mock_CloudConfig.go rename to agent/hcp/client/mock_CloudConfig.go index e2c6ba0c53be9..2dc523f487af4 100644 --- a/agent/hcp/config/mock_CloudConfig.go +++ b/agent/hcp/client/mock_CloudConfig.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package config +package client import ( "crypto/tls" diff --git a/agent/hcp/client/telemetry_config.go b/agent/hcp/client/telemetry_config.go index 0745f1b7c6197..4c5b27c58b4f4 100644 --- a/agent/hcp/client/telemetry_config.go +++ b/agent/hcp/client/telemetry_config.go @@ -20,7 +20,7 @@ import ( var ( // defaultMetricFilters is a regex that matches all metric names. - DefaultMetricFilters = regexp.MustCompile(".+") + defaultMetricFilters = regexp.MustCompile(".+") // Validation errors for AgentTelemetryConfigOK response. errMissingPayload = errors.New("missing payload") @@ -29,7 +29,6 @@ var ( errMissingMetricsConfig = errors.New("missing metrics config") errInvalidRefreshInterval = errors.New("invalid refresh interval") errInvalidEndpoint = errors.New("invalid metrics endpoint") - errEmptyEndpoint = errors.New("empty metrics endpoint") ) // TelemetryConfig contains configuration for telemetry data forwarded by Consul servers @@ -44,7 +43,6 @@ type MetricsConfig struct { Labels map[string]string Filters *regexp.Regexp Endpoint *url.URL - Disabled bool } // RefreshConfig contains configuration for the periodic fetch of configuration from HCP. @@ -52,6 +50,11 @@ type RefreshConfig struct { RefreshInterval time.Duration } +// MetricsEnabled returns true if metrics export is enabled, i.e. a valid metrics endpoint exists. +func (t *TelemetryConfig) MetricsEnabled() bool { + return t.MetricsConfig.Endpoint != nil +} + // validateAgentTelemetryConfigPayload ensures the returned payload from HCP is valid. func validateAgentTelemetryConfigPayload(resp *hcptelemetry.AgentTelemetryConfigOK) error { if resp.Payload == nil { @@ -83,7 +86,7 @@ func convertAgentTelemetryResponse(ctx context.Context, resp *hcptelemetry.Agent telemetryConfig := resp.Payload.TelemetryConfig metricsEndpoint, err := convertMetricEndpoint(telemetryConfig.Endpoint, telemetryConfig.Metrics.Endpoint) if err != nil { - return nil, err + return nil, errInvalidEndpoint } metricsFilters := convertMetricFilters(ctx, telemetryConfig.Metrics.IncludeList) @@ -94,7 +97,6 @@ func convertAgentTelemetryResponse(ctx context.Context, resp *hcptelemetry.Agent Endpoint: metricsEndpoint, Labels: metricLabels, Filters: metricsFilters, - Disabled: telemetryConfig.Metrics.Disabled, }, RefreshConfig: &RefreshConfig{ RefreshInterval: refreshInterval, @@ -112,8 +114,9 @@ func convertMetricEndpoint(telemetryEndpoint string, metricsEndpoint string) (*u endpoint = metricsEndpoint } + // If endpoint is empty, server not registered with CCM, no error returned. if endpoint == "" { - return nil, errEmptyEndpoint + return nil, nil } // Endpoint from CTW has no metrics path, so it must be added. @@ -142,7 +145,7 @@ func convertMetricFilters(ctx context.Context, payloadFilters []string) *regexp. if len(validFilters) == 0 { logger.Error("no valid filters") - return DefaultMetricFilters + return defaultMetricFilters } // Combine the valid regex strings with OR. @@ -150,7 +153,7 @@ func convertMetricFilters(ctx context.Context, payloadFilters []string) *regexp. composedRegex, err := regexp.Compile(finalRegex) if err != nil { logger.Error("failed to compile final regex", "error", err) - return DefaultMetricFilters + return defaultMetricFilters } return composedRegex diff --git a/agent/hcp/client/telemetry_config_test.go b/agent/hcp/client/telemetry_config_test.go index d43024400779a..1e6e2cb23a29d 100644 --- a/agent/hcp/client/telemetry_config_test.go +++ b/agent/hcp/client/telemetry_config_test.go @@ -88,6 +88,7 @@ func TestConvertAgentTelemetryResponse(t *testing.T) { resp *consul_telemetry_service.AgentTelemetryConfigOK expectedTelemetryCfg *TelemetryConfig wantErr error + expectedEnabled bool }{ "success": { resp: &consul_telemetry_service.AgentTelemetryConfigOK{ @@ -114,6 +115,34 @@ func TestConvertAgentTelemetryResponse(t *testing.T) { RefreshInterval: 2 * time.Second, }, }, + expectedEnabled: true, + }, + "successNoEndpoint": { + resp: &consul_telemetry_service.AgentTelemetryConfigOK{ + Payload: &models.HashicorpCloudConsulTelemetry20230414AgentTelemetryConfigResponse{ + TelemetryConfig: &models.HashicorpCloudConsulTelemetry20230414TelemetryConfig{ + Endpoint: "", + Labels: map[string]string{"test": "test"}, + Metrics: &models.HashicorpCloudConsulTelemetry20230414TelemetryMetricsConfig{ + IncludeList: []string{"test", "consul"}, + }, + }, + RefreshConfig: &models.HashicorpCloudConsulTelemetry20230414RefreshConfig{ + RefreshInterval: "2s", + }, + }, + }, + expectedTelemetryCfg: &TelemetryConfig{ + MetricsConfig: &MetricsConfig{ + Endpoint: nil, + Labels: map[string]string{"test": "test"}, + Filters: validTestFilters, + }, + RefreshConfig: &RefreshConfig{ + RefreshInterval: 2 * time.Second, + }, + }, + expectedEnabled: false, }, "successBadFilters": { resp: &consul_telemetry_service.AgentTelemetryConfigOK{ @@ -134,12 +163,13 @@ func TestConvertAgentTelemetryResponse(t *testing.T) { MetricsConfig: &MetricsConfig{ Endpoint: validTestURL, Labels: map[string]string{"test": "test"}, - Filters: DefaultMetricFilters, + Filters: defaultMetricFilters, }, RefreshConfig: &RefreshConfig{ RefreshInterval: 2 * time.Second, }, }, + expectedEnabled: true, }, "errorsWithInvalidRefreshInterval": { resp: &consul_telemetry_service.AgentTelemetryConfigOK{ @@ -179,6 +209,7 @@ func TestConvertAgentTelemetryResponse(t *testing.T) { } require.NoError(t, err) require.Equal(t, tc.expectedTelemetryCfg, telemetryCfg) + require.Equal(t, tc.expectedEnabled, telemetryCfg.MetricsEnabled()) }) } } @@ -200,10 +231,10 @@ func TestConvertMetricEndpoint(t *testing.T) { override: "https://override.com", expected: "https://override.com/v1/metrics", }, - "errorWithEmptyEndpoints": { + "noErrorWithEmptyEndpoints": { endpoint: "", override: "", - wantErr: errEmptyEndpoint, + expected: "", }, "errorWithInvalidURL": { endpoint: " ", @@ -221,6 +252,12 @@ func TestConvertMetricEndpoint(t *testing.T) { return } + if tc.expected == "" { + require.Nil(t, u) + require.NoError(t, err) + return + } + require.NotNil(t, u) require.NoError(t, err) require.Equal(t, tc.expected, u.String()) @@ -240,13 +277,13 @@ func TestConvertMetricFilters(t *testing.T) { }{ "badFilterRegex": { filters: []string{"(*LF)"}, - expectedRegexString: DefaultMetricFilters.String(), + expectedRegexString: defaultMetricFilters.String(), matches: []string{"consul.raft.peers", "consul.mem.heap_size"}, wantMatch: true, }, "emptyRegex": { filters: []string{}, - expectedRegexString: DefaultMetricFilters.String(), + expectedRegexString: defaultMetricFilters.String(), matches: []string{"consul.raft.peers", "consul.mem.heap_size"}, wantMatch: true, }, diff --git a/agent/hcp/config/config.go b/agent/hcp/config/config.go index 420af129adda6..319c39e40e94c 100644 --- a/agent/hcp/config/config.go +++ b/agent/hcp/config/config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package config @@ -11,13 +11,6 @@ import ( "github.com/hashicorp/hcp-sdk-go/resource" ) -// CloudConfigurer abstracts the cloud config methods needed to connect to HCP -// in an interface for easier testing. -type CloudConfigurer interface { - HCPConfig(opts ...hcpcfg.HCPConfigOption) (hcpcfg.HCPConfig, error) - Resource() (resource.Resource, error) -} - // CloudConfig defines configuration for connecting to HCP services type CloudConfig struct { ResourceID string @@ -46,8 +39,6 @@ func (c *CloudConfig) Resource() (resource.Resource, error) { return resource.FromString(c.ResourceID) } -// HCPConfig returns a configuration to use with the HCP SDK. It assumes that the environment -// variables for the HCP configuration have already been loaded and set in the CloudConfig. func (c *CloudConfig) HCPConfig(opts ...hcpcfg.HCPConfigOption) (hcpcfg.HCPConfig, error) { if c.TLSConfig == nil { c.TLSConfig = &tls.Config{} @@ -64,59 +55,6 @@ func (c *CloudConfig) HCPConfig(opts ...hcpcfg.HCPConfigOption) (hcpcfg.HCPConfi if c.ScadaAddress != "" { opts = append(opts, hcpcfg.WithSCADA(c.ScadaAddress, c.TLSConfig)) } - opts = append(opts, hcpcfg.WithoutBrowserLogin()) + opts = append(opts, hcpcfg.FromEnv(), hcpcfg.WithoutBrowserLogin()) return hcpcfg.NewHCPConfig(opts...) } - -// IsConfigured returns whether the cloud configuration has been set either -// in the configuration file or via environment variables. -func (c *CloudConfig) IsConfigured() bool { - return c.ResourceID != "" && c.ClientID != "" && c.ClientSecret != "" -} - -// Merge returns a cloud configuration that is the combined the values of -// two configurations. -func Merge(o CloudConfig, n CloudConfig) CloudConfig { - c := o - if n.ResourceID != "" { - c.ResourceID = n.ResourceID - } - - if n.ClientID != "" { - c.ClientID = n.ClientID - } - - if n.ClientSecret != "" { - c.ClientSecret = n.ClientSecret - } - - if n.Hostname != "" { - c.Hostname = n.Hostname - } - - if n.AuthURL != "" { - c.AuthURL = n.AuthURL - } - - if n.ScadaAddress != "" { - c.ScadaAddress = n.ScadaAddress - } - - if n.ManagementToken != "" { - c.ManagementToken = n.ManagementToken - } - - if n.TLSConfig != nil { - c.TLSConfig = n.TLSConfig - } - - if n.NodeID != "" { - c.NodeID = n.NodeID - } - - if n.NodeName != "" { - c.NodeName = n.NodeName - } - - return c -} diff --git a/agent/hcp/config/config_test.go b/agent/hcp/config/config_test.go deleted file mode 100644 index ca07d4d94eaae..0000000000000 --- a/agent/hcp/config/config_test.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package config - -import ( - "crypto/tls" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestMerge(t *testing.T) { - oldCfg := CloudConfig{ - ResourceID: "old-resource-id", - ClientID: "old-client-id", - ClientSecret: "old-client-secret", - Hostname: "old-hostname", - AuthURL: "old-auth-url", - ScadaAddress: "old-scada-address", - ManagementToken: "old-token", - TLSConfig: &tls.Config{ - ServerName: "old-server-name", - }, - NodeID: "old-node-id", - NodeName: "old-node-name", - } - - newCfg := CloudConfig{ - ResourceID: "new-resource-id", - ClientID: "new-client-id", - ClientSecret: "new-client-secret", - Hostname: "new-hostname", - AuthURL: "new-auth-url", - ScadaAddress: "new-scada-address", - ManagementToken: "new-token", - TLSConfig: &tls.Config{ - ServerName: "new-server-name", - }, - NodeID: "new-node-id", - NodeName: "new-node-name", - } - - for name, tc := range map[string]struct { - newCfg CloudConfig - expectedCfg CloudConfig - }{ - "Empty": { - newCfg: CloudConfig{}, - expectedCfg: oldCfg, - }, - "All": { - newCfg: newCfg, - expectedCfg: newCfg, - }, - "Partial": { - newCfg: CloudConfig{ - ResourceID: newCfg.ResourceID, - ClientID: newCfg.ClientID, - ClientSecret: newCfg.ClientSecret, - ManagementToken: newCfg.ManagementToken, - }, - expectedCfg: CloudConfig{ - ResourceID: newCfg.ResourceID, - ClientID: newCfg.ClientID, - ClientSecret: newCfg.ClientSecret, - ManagementToken: newCfg.ManagementToken, - Hostname: oldCfg.Hostname, - AuthURL: oldCfg.AuthURL, - ScadaAddress: oldCfg.ScadaAddress, - TLSConfig: oldCfg.TLSConfig, - NodeID: oldCfg.NodeID, - NodeName: oldCfg.NodeName, - }, - }, - } { - t.Run(name, func(t *testing.T) { - merged := Merge(oldCfg, tc.newCfg) - require.Equal(t, tc.expectedCfg, merged) - }) - } -} diff --git a/agent/hcp/deps.go b/agent/hcp/deps.go index 05e65708b6d14..ab8288e344d11 100644 --- a/agent/hcp/deps.go +++ b/agent/hcp/deps.go @@ -1,78 +1,86 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package hcp import ( "context" "fmt" + "time" "github.com/armon/go-metrics" - "github.com/hashicorp/go-hclog" - "go.opentelemetry.io/otel" - - "github.com/hashicorp/consul/agent/hcp/client" + hcpclient "github.com/hashicorp/consul/agent/hcp/client" "github.com/hashicorp/consul/agent/hcp/config" "github.com/hashicorp/consul/agent/hcp/scada" "github.com/hashicorp/consul/agent/hcp/telemetry" + "github.com/hashicorp/go-hclog" ) // Deps contains the interfaces that the rest of Consul core depends on for HCP integration. type Deps struct { - Config config.CloudConfig - Provider scada.Provider - Sink metrics.ShutdownSink - TelemetryProvider *hcpProviderImpl - DataDir string + Client hcpclient.Client + Provider scada.Provider + Sink metrics.MetricSink } -func NewDeps(cfg config.CloudConfig, logger hclog.Logger, dataDir string) (Deps, error) { +func NewDeps(cfg config.CloudConfig, logger hclog.Logger) (Deps, error) { ctx := context.Background() ctx = hclog.WithContext(ctx, logger) - provider, err := scada.New(logger.Named("scada")) + client, err := hcpclient.NewClient(cfg) if err != nil { - return Deps{}, fmt.Errorf("failed to init scada: %w", err) + return Deps{}, fmt.Errorf("failed to init client: %w", err) } - metricsProvider := NewHCPProvider(ctx) + provider, err := scada.New(cfg, logger.Named("scada")) if err != nil { - logger.Error("failed to init HCP metrics provider", "error", err) - return Deps{}, fmt.Errorf("failed to init HCP metrics provider: %w", err) + return Deps{}, fmt.Errorf("failed to init scada: %w", err) } - metricsClient := client.NewMetricsClient(ctx, metricsProvider) + metricsClient, err := hcpclient.NewMetricsClient(ctx, &cfg) + if err != nil { + logger.Error("failed to init metrics client", "error", err) + return Deps{}, fmt.Errorf("failed to init metrics client: %w", err) + } - sink, err := newSink(ctx, metricsClient, metricsProvider) + sink, err := sink(ctx, client, metricsClient) if err != nil { // Do not prevent server start if sink init fails, only log error. logger.Error("failed to init sink", "error", err) } return Deps{ - Config: cfg, - Provider: provider, - Sink: sink, - TelemetryProvider: metricsProvider, - DataDir: dataDir, + Client: client, + Provider: provider, + Sink: sink, }, nil } -// newSink initializes an OTELSink which forwards Consul metrics to HCP. +// sink initializes an OTELSink which forwards Consul metrics to HCP. +// The sink is only initialized if the server is registered with the management plane (CCM). // This step should not block server initialization, errors are returned, only to be logged. -func newSink( +func sink( ctx context.Context, + hcpClient hcpclient.Client, metricsClient telemetry.MetricsClient, - cfgProvider *hcpProviderImpl, -) (metrics.ShutdownSink, error) { - logger := hclog.FromContext(ctx) +) (metrics.MetricSink, error) { + logger := hclog.FromContext(ctx).Named("sink") + reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + telemetryCfg, err := hcpClient.FetchTelemetryConfig(reqCtx) + if err != nil { + return nil, fmt.Errorf("failed to fetch telemetry config: %w", err) + } + + if !telemetryCfg.MetricsEnabled() { + return nil, nil + } - // Set the global OTEL error handler. Without this, on any failure to publish metrics in - // otelExporter.Export, the default OTEL handler logs to stderr without the formatting or group - // that hclog provides. Here we override that global error handler once so logs are - // in the standard format and include "hcp" in the group name like: - // 2024-02-06T22:35:19.072Z [ERROR] agent.hcp: failed to export metrics: failed to export metrics: code 404: 404 page not found - otel.SetErrorHandler(&otelErrorHandler{logger: logger}) + cfgProvider, err := NewHCPProvider(ctx, hcpClient, telemetryCfg) + if err != nil { + return nil, fmt.Errorf("failed to init config provider: %w", err) + } reader := telemetry.NewOTELReader(metricsClient, cfgProvider) sinkOpts := &telemetry.OTELSinkOpts{ @@ -82,18 +90,10 @@ func newSink( sink, err := telemetry.NewOTELSink(ctx, sinkOpts) if err != nil { - return nil, fmt.Errorf("failed to create OTELSink: %w", err) + return nil, fmt.Errorf("failed create OTELSink: %w", err) } logger.Debug("initialized HCP metrics sink") return sink, nil } - -type otelErrorHandler struct { - logger hclog.Logger -} - -func (o *otelErrorHandler) Handle(err error) { - o.logger.Error(err.Error()) -} diff --git a/agent/hcp/deps_test.go b/agent/hcp/deps_test.go index 84a34dd697c23..101fe076cb697 100644 --- a/agent/hcp/deps_test.go +++ b/agent/hcp/deps_test.go @@ -5,10 +5,16 @@ package hcp import ( "context" + "fmt" + "net/url" + "regexp" "testing" + "time" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent/hcp/client" "github.com/hashicorp/consul/agent/hcp/telemetry" ) @@ -18,11 +24,79 @@ type mockMetricsClient struct { func TestSink(t *testing.T) { t.Parallel() + for name, test := range map[string]struct { + expect func(*client.MockClient) + wantErr string + expectedSink bool + }{ + "success": { + expect: func(mockClient *client.MockClient) { + u, _ := url.Parse("https://test.com/v1/metrics") + filters, _ := regexp.Compile("test") + mt := mockTelemetryConfig(1*time.Second, u, filters) + mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mt, nil) + }, + expectedSink: true, + }, + "noSinkWhenFetchTelemetryConfigFails": { + expect: func(mockClient *client.MockClient) { + mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(nil, fmt.Errorf("fetch failed")) + }, + wantErr: "failed to fetch telemetry config", + }, + "noSinkWhenServerNotRegisteredWithCCM": { + expect: func(mockClient *client.MockClient) { + mt := mockTelemetryConfig(1*time.Second, nil, nil) + mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mt, nil) + }, + }, + "noSinkWhenTelemetryConfigProviderInitFails": { + expect: func(mockClient *client.MockClient) { + u, _ := url.Parse("https://test.com/v1/metrics") + // Bad refresh interval forces ConfigProvider creation failure. + mt := mockTelemetryConfig(0*time.Second, u, nil) + mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mt, nil) + }, + wantErr: "failed to init config provider", + }, + } { + test := test + t.Run(name, func(t *testing.T) { + t.Parallel() + c := client.NewMockClient(t) + mc := mockMetricsClient{} - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - s, err := newSink(ctx, mockMetricsClient{}, &hcpProviderImpl{}) + test.expect(c) + ctx := context.Background() - require.NotNil(t, s) - require.NoError(t, err) + s, err := sink(ctx, c, mc) + + if test.wantErr != "" { + require.NotNil(t, err) + require.Contains(t, err.Error(), test.wantErr) + require.Nil(t, s) + return + } + + if !test.expectedSink { + require.Nil(t, s) + require.Nil(t, err) + return + } + + require.NotNil(t, s) + }) + } +} + +func mockTelemetryConfig(refreshInterval time.Duration, metricsEndpoint *url.URL, filters *regexp.Regexp) *client.TelemetryConfig { + return &client.TelemetryConfig{ + MetricsConfig: &client.MetricsConfig{ + Endpoint: metricsEndpoint, + Filters: filters, + }, + RefreshConfig: &client.RefreshConfig{ + RefreshInterval: refreshInterval, + }, + } } diff --git a/agent/hcp/discover/discover.go b/agent/hcp/discover/discover.go index 981400c38b478..12024b7dd6a0b 100644 --- a/agent/hcp/discover/discover.go +++ b/agent/hcp/discover/discover.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package discover diff --git a/agent/hcp/link_watch.go b/agent/hcp/link_watch.go deleted file mode 100644 index b89ba942e4655..0000000000000 --- a/agent/hcp/link_watch.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hcp - -import ( - "context" - "time" - - "github.com/hashicorp/go-hclog" - - hcpctl "github.com/hashicorp/consul/internal/hcp" - "github.com/hashicorp/consul/lib/retry" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -type LinkEventHandler = func(context.Context, hclog.Logger, *pbresource.WatchEvent) - -func handleLinkEvents(ctx context.Context, logger hclog.Logger, watchClient pbresource.ResourceService_WatchListClient, linkEventHandler LinkEventHandler) { - for { - select { - case <-ctx.Done(): - logger.Debug("context canceled, exiting") - return - default: - watchEvent, err := watchClient.Recv() - - if err != nil { - logger.Error("error receiving link watch event", "error", err) - return - } - - linkEventHandler(ctx, logger, watchEvent) - } - } -} - -func RunHCPLinkWatcher( - ctx context.Context, logger hclog.Logger, client pbresource.ResourceServiceClient, linkEventHandler LinkEventHandler, -) { - errorBackoff := &retry.Waiter{ - MinFailures: 10, - MinWait: 0, - MaxWait: 1 * time.Minute, - } - for { - select { - case <-ctx.Done(): - logger.Debug("context canceled, exiting") - return - default: - watchClient, err := client.WatchList( - ctx, &pbresource.WatchListRequest{ - Type: pbhcp.LinkType, - NamePrefix: hcpctl.LinkName, - }, - ) - if err != nil { - logger.Error("failed to create watch on Link", "error", err) - errorBackoff.Wait(ctx) - continue - } - errorBackoff.Reset() - handleLinkEvents(ctx, logger, watchClient, linkEventHandler) - } - } -} diff --git a/agent/hcp/link_watch_test.go b/agent/hcp/link_watch_test.go deleted file mode 100644 index 22d2204a817d1..0000000000000 --- a/agent/hcp/link_watch_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hcp - -import ( - "context" - "errors" - "testing" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - - "github.com/hashicorp/go-hclog" - - mockpbresource "github.com/hashicorp/consul/grpcmocks/proto-public/pbresource" - hcpctl "github.com/hashicorp/consul/internal/hcp" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -// This tests that when we get a watch event from the Recv call, we get that same event on the -// output channel, then we -func TestLinkWatcher_Ok(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - testWatchEvent := &pbresource.WatchEvent{} - mockWatchListClient := mockpbresource.NewResourceService_WatchListClient(t) - mockWatchListClient.EXPECT().Recv().Return(testWatchEvent, nil) - - eventCh := make(chan *pbresource.WatchEvent) - mockLinkHandler := func(_ context.Context, _ hclog.Logger, event *pbresource.WatchEvent) { - eventCh <- event - } - - client := mockpbresource.NewResourceServiceClient(t) - client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{ - Type: pbhcp.LinkType, - NamePrefix: hcpctl.LinkName, - }).Return(mockWatchListClient, nil) - - go RunHCPLinkWatcher(ctx, hclog.Default(), client, mockLinkHandler) - - // Assert that the link handler is called with the testWatchEvent - receivedWatchEvent := <-eventCh - require.Equal(t, testWatchEvent, receivedWatchEvent) -} - -func TestLinkWatcher_RecvError(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - - // Our mock WatchListClient will simulate 5 errors, then will cancel the context. - // We expect RunHCPLinkWatcher to attempt to create the WatchListClient 6 times (initial attempt plus 5 retries) - // before exiting due to context cancellation. - mockWatchListClient := mockpbresource.NewResourceService_WatchListClient(t) - numFailures := 5 - failures := 0 - mockWatchListClient.EXPECT().Recv().RunAndReturn(func() (*pbresource.WatchEvent, error) { - if failures < numFailures { - failures++ - return nil, errors.New("unexpectedError") - } - defer cancel() - return &pbresource.WatchEvent{}, nil - }) - - client := mockpbresource.NewResourceServiceClient(t) - client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{ - Type: pbhcp.LinkType, - NamePrefix: hcpctl.LinkName, - }).Return(mockWatchListClient, nil).Times(numFailures + 1) - - RunHCPLinkWatcher(ctx, hclog.Default(), client, func(_ context.Context, _ hclog.Logger, _ *pbresource.WatchEvent) {}) -} - -func TestLinkWatcher_WatchListError(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - - // Our mock WatchList will simulate 5 errors, then will cancel the context. - // We expect RunHCPLinkWatcher to attempt to create the WatchListClient 6 times (initial attempt plus 5 retries) - // before exiting due to context cancellation. - numFailures := 5 - failures := 0 - - client := mockpbresource.NewResourceServiceClient(t) - client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{ - Type: pbhcp.LinkType, - NamePrefix: hcpctl.LinkName, - }).RunAndReturn(func(_ context.Context, _ *pbresource.WatchListRequest, _ ...grpc.CallOption) (pbresource.ResourceService_WatchListClient, error) { - if failures < numFailures { - failures++ - return nil, errors.New("unexpectedError") - } - defer cancel() - return mockpbresource.NewResourceService_WatchListClient(t), nil - }).Times(numFailures + 1) - - RunHCPLinkWatcher(ctx, hclog.Default(), client, func(_ context.Context, _ hclog.Logger, _ *pbresource.WatchEvent) {}) -} diff --git a/agent/hcp/manager.go b/agent/hcp/manager.go index 8fb1ac67c8528..0dc9db95da295 100644 --- a/agent/hcp/manager.go +++ b/agent/hcp/manager.go @@ -1,20 +1,16 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package hcp import ( "context" - "reflect" "sync" "time" - "github.com/hashicorp/go-hclog" - hcpclient "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" - "github.com/hashicorp/consul/agent/hcp/scada" "github.com/hashicorp/consul/lib" + "github.com/hashicorp/go-hclog" ) var ( @@ -22,21 +18,12 @@ var ( defaultManagerMaxInterval = 75 * time.Minute ) -var _ Manager = (*HCPManager)(nil) - type ManagerConfig struct { - Client hcpclient.Client - CloudConfig config.CloudConfig - SCADAProvider scada.Provider - TelemetryProvider TelemetryProvider - - StatusFn StatusCallback - // Idempotent function to upsert the HCP management token. This will be called periodically in - // the manager's main loop. - ManagementTokenUpserterFn ManagementTokenUpserter - ManagementTokenDeleterFn ManagementTokenDeleter - MinInterval time.Duration - MaxInterval time.Duration + Client hcpclient.Client + + StatusFn StatusCallback + MinInterval time.Duration + MaxInterval time.Duration Logger hclog.Logger } @@ -62,36 +49,24 @@ func (cfg *ManagerConfig) nextHeartbeat() time.Duration { } type StatusCallback func(context.Context) (hcpclient.ServerStatus, error) -type ManagementTokenUpserter func(name, secretId string) error -type ManagementTokenDeleter func(secretId string) error - -//go:generate mockery --name Manager --with-expecter --inpackage -type Manager interface { - Start(context.Context) error - Stop() error - GetCloudConfig() config.CloudConfig - UpdateConfig(hcpclient.Client, config.CloudConfig) -} -type HCPManager struct { +type Manager struct { logger hclog.Logger - running bool - runLock sync.RWMutex - cfg ManagerConfig cfgMu sync.RWMutex updateCh chan struct{} - stopCh chan struct{} // testUpdateSent is set by unit tests to signal when the manager's status update has triggered testUpdateSent chan struct{} } -// NewManager returns a Manager initialized with the given configuration. -func NewManager(cfg ManagerConfig) *HCPManager { - return &HCPManager{ +// NewManager returns an initialized Manager with a zero configuration. It won't +// do anything until UpdateConfig is called with a config that provides +// credentials to contact HCP. +func NewManager(cfg ManagerConfig) *Manager { + return &Manager{ logger: cfg.Logger, cfg: cfg, @@ -99,162 +74,66 @@ func NewManager(cfg ManagerConfig) *HCPManager { } } -// Start executes the logic for connecting to HCP and sending periodic server updates. If the -// manager has been previously started, it will not start again. -func (m *HCPManager) Start(ctx context.Context) error { - // Check if the manager has already started - changed := m.setRunning(true) - if !changed { - m.logger.Trace("HCP manager already started") - return nil - } - +// Run executes the Manager it's designed to be run in its own goroutine for +// the life of a server agent. It should be run even if HCP is not configured +// yet for servers since a config update might configure it later and +// UpdateConfig called. It will effectively do nothing if there are no HCP +// credentials set other than wait for some to be added. +func (m *Manager) Run(ctx context.Context) { var err error - m.logger.Info("HCP manager starting") - - // Update and start the SCADA provider - err = m.startSCADAProvider() - if err != nil { - m.logger.Error("failed to start scada provider", "error", err) - m.setRunning(false) - return err - } - - // Update and start the telemetry provider to enable the HCP metrics sink - if err := m.startTelemetryProvider(ctx); err != nil { - m.logger.Error("failed to update telemetry config provider", "error", err) - m.setRunning(false) - return err - } + m.logger.Debug("HCP manager starting") // immediately send initial update select { case <-ctx.Done(): - m.setRunning(false) - return nil - case <-m.stopCh: - return nil + return case <-m.updateCh: // empty the update chan if there is a queued update to prevent repeated update in main loop err = m.sendUpdate() - if err != nil { - m.setRunning(false) - return err - } default: err = m.sendUpdate() - if err != nil { - m.setRunning(false) - return err - } } // main loop - go func() { - for { - m.cfgMu.RLock() - cfg := m.cfg - m.cfgMu.RUnlock() - - // Check for configured management token from HCP and upsert it if found - if hcpManagement := cfg.CloudConfig.ManagementToken; len(hcpManagement) > 0 { - if cfg.ManagementTokenUpserterFn != nil { - upsertTokenErr := cfg.ManagementTokenUpserterFn("HCP Management Token", hcpManagement) - if upsertTokenErr != nil { - m.logger.Error("failed to upsert HCP management token", "err", upsertTokenErr) - } - } - } - - nextUpdate := cfg.nextHeartbeat() - if err != nil { - m.logger.Error("failed to send server status to HCP", "err", err, "next_heartbeat", nextUpdate.String()) - } - - select { - case <-ctx.Done(): - m.setRunning(false) - return - - case <-m.stopCh: - return - - case <-m.updateCh: - err = m.sendUpdate() - - case <-time.After(nextUpdate): - err = m.sendUpdate() - } + for { + m.cfgMu.RLock() + cfg := m.cfg + m.cfgMu.RUnlock() + nextUpdate := cfg.nextHeartbeat() + if err != nil { + m.logger.Error("failed to send server status to HCP", "err", err, "next_heartbeat", nextUpdate.String()) } - }() - - return err -} -func (m *HCPManager) startSCADAProvider() error { - provider := m.cfg.SCADAProvider - if provider == nil { - return nil - } + select { + case <-ctx.Done(): + return - // Update the SCADA provider configuration with HCP configurations - m.logger.Debug("updating scada provider with HCP configuration") - err := provider.UpdateHCPConfig(m.cfg.CloudConfig) - if err != nil { - m.logger.Error("failed to update scada provider with HCP configuration", "err", err) - return err - } + case <-m.updateCh: + err = m.sendUpdate() - // Update the SCADA provider metadata - provider.UpdateMeta(map[string]string{ - "consul_server_id": string(m.cfg.CloudConfig.NodeID), - }) - - // Start the SCADA provider - err = provider.Start() - if err != nil { - return err - } - return nil -} - -func (m *HCPManager) startTelemetryProvider(ctx context.Context) error { - if m.cfg.TelemetryProvider == nil || reflect.ValueOf(m.cfg.TelemetryProvider).IsNil() { - return nil + case <-time.After(nextUpdate): + err = m.sendUpdate() + } } - - m.cfg.TelemetryProvider.Start(ctx, &HCPProviderCfg{ - HCPClient: m.cfg.Client, - HCPConfig: &m.cfg.CloudConfig, - }) - - return nil } -func (m *HCPManager) GetCloudConfig() config.CloudConfig { - m.cfgMu.RLock() - defer m.cfgMu.RUnlock() - - return m.cfg.CloudConfig -} - -func (m *HCPManager) UpdateConfig(client hcpclient.Client, cloudCfg config.CloudConfig) { +func (m *Manager) UpdateConfig(cfg ManagerConfig) { m.cfgMu.Lock() - // Save original values - originalCfg := m.cfg.CloudConfig - originalClient := m.cfg.Client - - // Update with new values - m.cfg.Client = client - m.cfg.CloudConfig = cloudCfg - m.cfgMu.Unlock() - - // Send update if already running and values were updated - if m.isRunning() && (originalClient != client || originalCfg != cloudCfg) { - m.SendUpdate() + defer m.cfgMu.Unlock() + old := m.cfg + m.cfg = cfg + if old.enabled() || cfg.enabled() { + // Only log about this if cloud is actually configured or it would be + // confusing. We check both old and new in case we are disabling cloud or + // enabling it or just updating it. + m.logger.Info("updated HCP configuration") } + + // Send a new status update since we might have just gotten connection details + // for the first time. + m.SendUpdate() } -func (m *HCPManager) SendUpdate() { +func (m *Manager) SendUpdate() { m.logger.Debug("HCP triggering status update") select { case m.updateCh <- struct{}{}: @@ -272,7 +151,7 @@ func (m *HCPManager) SendUpdate() { // and a "isRetrying" state or something so that we attempt to send update, but // then fetch fresh info on each attempt to send so if we are already in a retry // backoff a new push is a no-op. -func (m *HCPManager) sendUpdate() error { +func (m *Manager) sendUpdate() error { m.cfgMu.RLock() cfg := m.cfg m.cfgMu.RUnlock() @@ -298,67 +177,5 @@ func (m *HCPManager) sendUpdate() error { return err } - return cfg.Client.PushServerStatus(ctx, &s) -} - -func (m *HCPManager) isRunning() bool { - m.runLock.RLock() - defer m.runLock.RUnlock() - return m.running -} - -// setRunning sets the running status of the manager to the given value. If the -// given value is the same as the current running status, it returns false. If -// current status is updated to the given status, it returns true. -func (m *HCPManager) setRunning(r bool) bool { - m.runLock.Lock() - defer m.runLock.Unlock() - - if m.running == r { - return false - } - - // Initialize or close the stop channel depending what running status - // we're transitioning to. Channel must be initialized on start since - // a provider can be stopped and started multiple times. - if r { - m.stopCh = make(chan struct{}) - } else { - close(m.stopCh) - } - - m.running = r - return true -} - -// Stop stops the manager's main loop that sends updates -// and stops the SCADA provider and telemetry provider. -func (m *HCPManager) Stop() error { - changed := m.setRunning(false) - if !changed { - m.logger.Trace("HCP manager already stopped") - return nil - } - m.logger.Info("HCP manager stopping") - - m.cfgMu.RLock() - defer m.cfgMu.RUnlock() - - if m.cfg.SCADAProvider != nil { - m.cfg.SCADAProvider.Stop() - } - - if m.cfg.TelemetryProvider != nil && !reflect.ValueOf(m.cfg.TelemetryProvider).IsNil() { - m.cfg.TelemetryProvider.Stop() - } - - if m.cfg.ManagementTokenDeleterFn != nil && m.cfg.CloudConfig.ManagementToken != "" { - err := m.cfg.ManagementTokenDeleterFn(m.cfg.CloudConfig.ManagementToken) - if err != nil { - return err - } - } - - m.logger.Info("HCP manager stopped") - return nil + return m.cfg.Client.PushServerStatus(ctx, &s) } diff --git a/agent/hcp/manager_lifecycle.go b/agent/hcp/manager_lifecycle.go deleted file mode 100644 index 6b7b6a46dc712..0000000000000 --- a/agent/hcp/manager_lifecycle.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hcp - -import ( - "context" - "os" - "path/filepath" - - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/consul/agent/hcp/bootstrap/constants" - hcpclient "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" - hcpctl "github.com/hashicorp/consul/internal/hcp" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -// HCPManagerLifecycleFn returns a LinkEventHandler function which will appropriately -// Start and Stop the HCP Manager based on the Link event received. If a link is upserted, -// the HCP Manager is started, and if a link is deleted, the HCP manager is stopped. -func HCPManagerLifecycleFn( - m Manager, - hcpClientFn func(cfg config.CloudConfig) (hcpclient.Client, error), - loadMgmtTokenFn func( - ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string, - ) (string, error), - cloudConfig config.CloudConfig, - dataDir string, -) LinkEventHandler { - return func(ctx context.Context, logger hclog.Logger, watchEvent *pbresource.WatchEvent) { - // This indicates that a Link was deleted - if watchEvent.GetDelete() != nil { - logger.Debug("HCP Link deleted, stopping HCP manager") - - if dataDir != "" { - hcpConfigDir := filepath.Join(dataDir, constants.SubDir) - logger.Debug("deleting hcp-config dir", "dir", hcpConfigDir) - err := os.RemoveAll(hcpConfigDir) - if err != nil { - logger.Error("failed to delete hcp-config dir", "dir", hcpConfigDir, "err", err) - } - } - - err := m.Stop() - if err != nil { - logger.Error("error stopping HCP manager", "error", err) - } - return - } - - // This indicates that a Link was either created or updated - if watchEvent.GetUpsert() != nil { - logger.Debug("HCP Link upserted, starting manager if not already started") - - res := watchEvent.GetUpsert().GetResource() - var link pbhcp.Link - if err := res.GetData().UnmarshalTo(&link); err != nil { - logger.Error("error unmarshalling link data", "error", err) - return - } - - if validated, reason := hcpctl.IsValidated(res); !validated { - logger.Debug("HCP Link not validated, not starting manager", "reason", reason) - return - } - - // Update the HCP manager configuration with the link values - // Merge the link data with the existing cloud config so that we only overwrite the - // fields that are provided by the link. This ensures that: - // 1. The HCP configuration (i.e., how to connect to HCP) is preserved - // 2. The Consul agent's node ID and node name are preserved - newCfg := config.CloudConfig{ - ResourceID: link.ResourceId, - ClientID: link.ClientId, - ClientSecret: link.ClientSecret, - } - mergedCfg := config.Merge(cloudConfig, newCfg) - hcpClient, err := hcpClientFn(mergedCfg) - if err != nil { - logger.Error("error creating HCP client", "error", err) - return - } - - // Load the management token if access is set to read-write. Read-only clusters - // will not have a management token provided by HCP. - var token string - if link.GetAccessLevel() == pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE { - token, err = loadMgmtTokenFn(ctx, logger, hcpClient, dataDir) - if err != nil { - logger.Error("error loading management token", "error", err) - return - } - } - - mergedCfg.ManagementToken = token - m.UpdateConfig(hcpClient, mergedCfg) - - err = m.Start(ctx) - if err != nil { - logger.Error("error starting HCP manager", "error", err) - } - } - } -} diff --git a/agent/hcp/manager_lifecycle_test.go b/agent/hcp/manager_lifecycle_test.go deleted file mode 100644 index b40a772ab4c8e..0000000000000 --- a/agent/hcp/manager_lifecycle_test.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hcp - -import ( - "context" - "errors" - "io" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/consul/agent/hcp/bootstrap/constants" - hcpclient "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" - hcpctl "github.com/hashicorp/consul/internal/hcp" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/sdk/testutil" -) - -func TestHCPManagerLifecycleFn(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - logger := hclog.New(&hclog.LoggerOptions{Output: io.Discard}) - - mockHCPClient := hcpclient.NewMockClient(t) - mockHcpClientFn := func(_ config.CloudConfig) (hcpclient.Client, error) { - return mockHCPClient, nil - } - - mockLoadMgmtTokenFn := func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) { - return "test-mgmt-token", nil - } - - dataDir := testutil.TempDir(t, "test-link-controller") - err := os.Mkdir(filepath.Join(dataDir, constants.SubDir), os.ModeDir) - require.NoError(t, err) - existingCfg := config.CloudConfig{ - AuthURL: "test.com", - } - - type testCase struct { - mutateLink func(*pbhcp.Link) - mutateUpsertEvent func(*pbresource.WatchEvent_Upsert) - applyMocksAndAssertions func(*testing.T, *MockManager, *pbhcp.Link) - hcpClientFn func(config.CloudConfig) (hcpclient.Client, error) - loadMgmtTokenFn func(context.Context, hclog.Logger, hcpclient.Client, string) (string, error) - } - - testCases := map[string]testCase{ - // HCP manager should be started when link is created and stopped when link is deleted - "Ok": { - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.EXPECT().Start(mock.Anything).Return(nil).Once() - - expectedCfg := config.CloudConfig{ - ResourceID: link.ResourceId, - ClientID: link.ClientId, - ClientSecret: link.ClientSecret, - AuthURL: "test.com", - ManagementToken: "test-mgmt-token", - } - mgr.EXPECT().UpdateConfig(mockHCPClient, expectedCfg).Once() - - mgr.EXPECT().Stop().Return(nil).Once() - }, - }, - // HCP manager should not be updated with management token - "ReadOnly": { - mutateLink: func(link *pbhcp.Link) { - link.AccessLevel = pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY - }, - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.EXPECT().Start(mock.Anything).Return(nil).Once() - - expectedCfg := config.CloudConfig{ - ResourceID: link.ResourceId, - ClientID: link.ClientId, - ClientSecret: link.ClientSecret, - AuthURL: "test.com", - ManagementToken: "", - } - mgr.EXPECT().UpdateConfig(mockHCPClient, expectedCfg).Once() - - mgr.EXPECT().Stop().Return(nil).Once() - }, - }, - // HCP manager should not be started or updated if link is not validated - "ValidationError": { - mutateUpsertEvent: func(upsert *pbresource.WatchEvent_Upsert) { - upsert.Resource.Status = map[string]*pbresource.Status{ - hcpctl.StatusKey: { - Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedFailed}, - }, - } - }, - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.AssertNotCalled(t, "Start", mock.Anything) - mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything) - mgr.EXPECT().Stop().Return(nil).Once() - }, - }, - "Error_InvalidLink": { - mutateUpsertEvent: func(upsert *pbresource.WatchEvent_Upsert) { - upsert.Resource = nil - }, - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.AssertNotCalled(t, "Start", mock.Anything) - mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything) - mgr.EXPECT().Stop().Return(nil).Once() - }, - }, - "Error_HCPManagerStop": { - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.EXPECT().Start(mock.Anything).Return(nil).Once() - mgr.EXPECT().UpdateConfig(mock.Anything, mock.Anything).Return().Once() - mgr.EXPECT().Stop().Return(errors.New("could not stop HCP manager")).Once() - }, - }, - "Error_CreatingHCPClient": { - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.AssertNotCalled(t, "Start", mock.Anything) - mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything) - mgr.EXPECT().Stop().Return(nil).Once() - }, - hcpClientFn: func(_ config.CloudConfig) (hcpclient.Client, error) { - return nil, errors.New("could not create HCP client") - }, - }, - // This should result in the HCP manager not being started - "Error_LoadMgmtToken": { - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.AssertNotCalled(t, "Start", mock.Anything) - mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything) - mgr.EXPECT().Stop().Return(nil).Once() - }, - loadMgmtTokenFn: func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) { - return "", errors.New("could not load management token") - }, - }, - "Error_HCPManagerStart": { - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.EXPECT().Start(mock.Anything).Return(errors.New("could not start HCP manager")).Once() - mgr.EXPECT().UpdateConfig(mock.Anything, mock.Anything).Return().Once() - mgr.EXPECT().Stop().Return(nil).Once() - }, - }, - } - - for name, test := range testCases { - t.Run(name, func(t2 *testing.T) { - mgr := NewMockManager(t2) - - // Set up a link - link := pbhcp.Link{ - ResourceId: "abc", - ClientId: "def", - ClientSecret: "ghi", - AccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE, - } - - if test.mutateLink != nil { - test.mutateLink(&link) - } - - linkResource, err := anypb.New(&link) - require.NoError(t2, err) - - if test.applyMocksAndAssertions != nil { - test.applyMocksAndAssertions(t2, mgr, &link) - } - - testHcpClientFn := mockHcpClientFn - if test.hcpClientFn != nil { - testHcpClientFn = test.hcpClientFn - } - - testLoadMgmtToken := mockLoadMgmtTokenFn - if test.loadMgmtTokenFn != nil { - testLoadMgmtToken = test.loadMgmtTokenFn - } - - updateManagerLifecycle := HCPManagerLifecycleFn( - mgr, testHcpClientFn, - testLoadMgmtToken, existingCfg, dataDir, - ) - - upsertEvent := &pbresource.WatchEvent_Upsert{ - Resource: &pbresource.Resource{ - Id: &pbresource.ID{ - Name: "global", - Type: pbhcp.LinkType, - }, - Status: map[string]*pbresource.Status{ - hcpctl.StatusKey: { - Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedSuccess}, - }, - }, - Data: linkResource, - }, - } - if test.mutateUpsertEvent != nil { - test.mutateUpsertEvent(upsertEvent) - } - - // Handle upsert event - updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{ - Event: &pbresource.WatchEvent_Upsert_{ - Upsert: upsertEvent, - }, - }) - - // Handle delete event. This should stop HCP manager - updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{ - Event: &pbresource.WatchEvent_Delete_{ - Delete: &pbresource.WatchEvent_Delete{}, - }, - }) - - // Ensure hcp-config directory is removed - file := filepath.Join(dataDir, constants.SubDir) - if _, err := os.Stat(file); err == nil || !os.IsNotExist(err) { - require.Fail(t2, "should have removed hcp-config directory") - } - }) - } -} diff --git a/agent/hcp/manager_test.go b/agent/hcp/manager_test.go index 83773791727d5..48ace166618bf 100644 --- a/agent/hcp/manager_test.go +++ b/agent/hcp/manager_test.go @@ -1,269 +1,48 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package hcp import ( - "fmt" "io" "testing" "time" + hcpclient "github.com/hashicorp/consul/agent/hcp/client" + "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "golang.org/x/net/context" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/hashicorp/go-hclog" - - hcpclient "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" - "github.com/hashicorp/consul/agent/hcp/scada" - hcpctl "github.com/hashicorp/consul/internal/hcp" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/sdk/testutil" ) -func TestManager_MonitorHCPLink(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - logger := hclog.New(&hclog.LoggerOptions{Output: io.Discard}) - - mgr := NewManager( - ManagerConfig{ - Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), - }, - ) - mockHCPClient := hcpclient.NewMockClient(t) - mockHcpClientFn := func(_ config.CloudConfig) (hcpclient.Client, error) { - return mockHCPClient, nil - } - loadMgmtTokenFn := func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) { - return "test-mgmt-token", nil - } - - require.False(t, mgr.isRunning()) - updateManagerLifecycle := HCPManagerLifecycleFn( - mgr, mockHcpClientFn, - loadMgmtTokenFn, config.CloudConfig{}, "", - ) - - // Set up a link - link := pbhcp.Link{ - ResourceId: "abc", - ClientId: "def", - ClientSecret: "ghi", - AccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE, - } - linkResource, err := anypb.New(&link) - require.NoError(t, err) - updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{ - Event: &pbresource.WatchEvent_Upsert_{ - Upsert: &pbresource.WatchEvent_Upsert{ - Resource: &pbresource.Resource{ - Id: &pbresource.ID{ - Name: "global", - Type: pbhcp.LinkType, - }, - Status: map[string]*pbresource.Status{ - hcpctl.StatusKey: { - Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedSuccess}, - }, - }, - Data: linkResource, - }, - }, - }, - }) - - // Validate that the HCP manager is started - require.True(t, mgr.isRunning()) -} - -func TestManager_Start(t *testing.T) { +func TestManager_Run(t *testing.T) { client := hcpclient.NewMockClient(t) statusF := func(ctx context.Context) (hcpclient.ServerStatus, error) { return hcpclient.ServerStatus{ID: t.Name()}, nil } - upsertManagementTokenCalled := make(chan struct{}, 1) - upsertManagementTokenF := func(name, secretID string) error { - upsertManagementTokenCalled <- struct{}{} - return nil - } updateCh := make(chan struct{}, 1) client.EXPECT().PushServerStatus(mock.Anything, &hcpclient.ServerStatus{ID: t.Name()}).Return(nil).Once() - - cloudCfg := config.CloudConfig{ - ResourceID: "resource-id", - NodeID: "node-1", - ManagementToken: "fake-token", - } - scadaM := scada.NewMockProvider(t) - scadaM.EXPECT().UpdateHCPConfig(cloudCfg).Return(nil).Once() - scadaM.EXPECT().UpdateMeta( - map[string]string{ - "consul_server_id": string(cloudCfg.NodeID), - }, - ).Return().Once() - scadaM.EXPECT().Start().Return(nil) - - telemetryM := NewMockTelemetryProvider(t) - telemetryM.EXPECT().Start( - mock.Anything, &HCPProviderCfg{ - HCPClient: client, - HCPConfig: &cloudCfg, - }, - ).Return(nil).Once() - - mgr := NewManager( - ManagerConfig{ - Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), - StatusFn: statusF, - ManagementTokenUpserterFn: upsertManagementTokenF, - SCADAProvider: scadaM, - TelemetryProvider: telemetryM, - }, - ) + mgr := NewManager(ManagerConfig{ + Client: client, + Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), + StatusFn: statusF, + }) mgr.testUpdateSent = updateCh ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - mgr.UpdateConfig(client, cloudCfg) - mgr.Start(ctx) + go mgr.Run(ctx) select { case <-updateCh: case <-time.After(time.Second): require.Fail(t, "manager did not send update in expected time") } - select { - case <-upsertManagementTokenCalled: - case <-time.After(time.Second): - require.Fail(t, "manager did not upsert management token in expected time") - } - // Make sure after manager has stopped no more statuses are pushed. cancel() client.AssertExpectations(t) } -func TestManager_StartMultipleTimes(t *testing.T) { - client := hcpclient.NewMockClient(t) - statusF := func(ctx context.Context) (hcpclient.ServerStatus, error) { - return hcpclient.ServerStatus{ID: t.Name()}, nil - } - - updateCh := make(chan struct{}, 1) - client.EXPECT().PushServerStatus(mock.Anything, &hcpclient.ServerStatus{ID: t.Name()}).Return(nil).Once() - - cloudCfg := config.CloudConfig{ - ResourceID: "organization/85702e73-8a3d-47dc-291c-379b783c5804/project/8c0547c0-10e8-1ea2-dffe-384bee8da634/hashicorp.consul.global-network-manager.cluster/test", - NodeID: "node-1", - ManagementToken: "fake-token", - } - - mgr := NewManager( - ManagerConfig{ - Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), - StatusFn: statusF, - }, - ) - - mgr.testUpdateSent = updateCh - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Start the manager twice concurrently, expect only one update - mgr.UpdateConfig(client, cloudCfg) - go mgr.Start(ctx) - go mgr.Start(ctx) - select { - case <-updateCh: - case <-time.After(time.Second): - require.Fail(t, "manager did not send update in expected time") - } - - select { - case <-updateCh: - require.Fail(t, "manager sent an update when not expected") - case <-time.After(time.Second): - } - - // Try start the manager again, still don't expect an update since already running - mgr.Start(ctx) - select { - case <-updateCh: - require.Fail(t, "manager sent an update when not expected") - case <-time.After(time.Second): - } -} - -func TestManager_UpdateConfig(t *testing.T) { - client := hcpclient.NewMockClient(t) - statusF := func(ctx context.Context) (hcpclient.ServerStatus, error) { - return hcpclient.ServerStatus{ID: t.Name()}, nil - } - - updateCh := make(chan struct{}, 1) - - cloudCfg := config.CloudConfig{ - ResourceID: "organization/85702e73-8a3d-47dc-291c-379b783c5804/project/8c0547c0-10e8-1ea2-dffe-384bee8da634/hashicorp.consul.global-network-manager.cluster/test", - NodeID: "node-1", - } - - mgr := NewManager( - ManagerConfig{ - Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), - StatusFn: statusF, - CloudConfig: cloudCfg, - Client: client, - }, - ) - - mgr.testUpdateSent = updateCh - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Start the manager, expect an initial status update - client.EXPECT().PushServerStatus(mock.Anything, &hcpclient.ServerStatus{ID: t.Name()}).Return(nil).Once() - mgr.Start(ctx) - select { - case <-updateCh: - case <-time.After(time.Second): - require.Fail(t, "manager did not send update in expected time") - } - - // Update the cloud configuration, expect a status update - client.EXPECT().PushServerStatus(mock.Anything, &hcpclient.ServerStatus{ID: t.Name()}).Return(nil).Once() - updatedCfg := cloudCfg - updatedCfg.ManagementToken = "token" - mgr.UpdateConfig(client, updatedCfg) - select { - case <-updateCh: - case <-time.After(time.Second): - require.Fail(t, "manager did not send update in expected time") - } - - // Update the client, expect a status update - updatedClient := hcpclient.NewMockClient(t) - updatedClient.EXPECT().PushServerStatus(mock.Anything, &hcpclient.ServerStatus{ID: t.Name()}).Return(nil).Once() - mgr.UpdateConfig(updatedClient, updatedCfg) - select { - case <-updateCh: - case <-time.After(time.Second): - require.Fail(t, "manager did not send update in expected time") - } - - // Update with the same values, don't expect a status update - mgr.UpdateConfig(updatedClient, updatedCfg) - select { - case <-updateCh: - require.Fail(t, "manager sent an update when not expected") - case <-time.After(time.Second): - } -} - func TestManager_SendUpdate(t *testing.T) { client := hcpclient.NewMockClient(t) statusF := func(ctx context.Context) (hcpclient.ServerStatus, error) { @@ -273,19 +52,17 @@ func TestManager_SendUpdate(t *testing.T) { // Expect two calls, once during run startup and again when SendUpdate is called client.EXPECT().PushServerStatus(mock.Anything, &hcpclient.ServerStatus{ID: t.Name()}).Return(nil).Twice() - mgr := NewManager( - ManagerConfig{ - Client: client, - Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), - StatusFn: statusF, - }, - ) + mgr := NewManager(ManagerConfig{ + Client: client, + Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), + StatusFn: statusF, + }) mgr.testUpdateSent = updateCh ctx, cancel := context.WithCancel(context.Background()) defer cancel() - mgr.Start(ctx) + go mgr.Run(ctx) select { case <-updateCh: case <-time.After(time.Second): @@ -309,21 +86,19 @@ func TestManager_SendUpdate_Periodic(t *testing.T) { // Expect two calls, once during run startup and again when SendUpdate is called client.EXPECT().PushServerStatus(mock.Anything, &hcpclient.ServerStatus{ID: t.Name()}).Return(nil).Twice() - mgr := NewManager( - ManagerConfig{ - Client: client, - Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), - StatusFn: statusF, - MaxInterval: time.Second, - MinInterval: 100 * time.Millisecond, - }, - ) + mgr := NewManager(ManagerConfig{ + Client: client, + Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), + StatusFn: statusF, + MaxInterval: time.Second, + MinInterval: 100 * time.Millisecond, + }) mgr.testUpdateSent = updateCh ctx, cancel := context.WithCancel(context.Background()) defer cancel() - mgr.Start(ctx) + go mgr.Run(ctx) select { case <-updateCh: case <-time.After(time.Second): @@ -336,105 +111,3 @@ func TestManager_SendUpdate_Periodic(t *testing.T) { } client.AssertExpectations(t) } - -func TestManager_Stop(t *testing.T) { - client := hcpclient.NewMockClient(t) - - // Configure status functions called in sendUpdate - statusF := func(ctx context.Context) (hcpclient.ServerStatus, error) { - return hcpclient.ServerStatus{ID: t.Name()}, nil - } - updateCh := make(chan struct{}, 1) - client.EXPECT().PushServerStatus(mock.Anything, &hcpclient.ServerStatus{ID: t.Name()}).Return(nil).Twice() - - // Configure management token creation and cleanup - token := "test-token" - upsertManagementTokenCalled := make(chan struct{}, 1) - upsertManagementTokenF := func(name, secretID string) error { - upsertManagementTokenCalled <- struct{}{} - if secretID != token { - return fmt.Errorf("expected token %q, got %q", token, secretID) - } - return nil - } - deleteManagementTokenCalled := make(chan struct{}, 1) - deleteManagementTokenF := func(secretID string) error { - deleteManagementTokenCalled <- struct{}{} - if secretID != token { - return fmt.Errorf("expected token %q, got %q", token, secretID) - } - return nil - } - - // Configure the SCADA provider - scadaM := scada.NewMockProvider(t) - scadaM.EXPECT().UpdateHCPConfig(mock.Anything).Return(nil).Once() - scadaM.EXPECT().UpdateMeta(mock.Anything).Return().Once() - scadaM.EXPECT().Start().Return(nil).Once() - scadaM.EXPECT().Stop().Return(nil).Once() - - // Configure the telemetry provider - telemetryM := NewMockTelemetryProvider(t) - telemetryM.EXPECT().Start(mock.Anything, mock.Anything).Return(nil).Once() - telemetryM.EXPECT().Stop().Return().Once() - - // Configure manager with all its dependencies - mgr := NewManager( - ManagerConfig{ - Logger: testutil.Logger(t), - StatusFn: statusF, - Client: client, - ManagementTokenUpserterFn: upsertManagementTokenF, - ManagementTokenDeleterFn: deleteManagementTokenF, - SCADAProvider: scadaM, - TelemetryProvider: telemetryM, - CloudConfig: config.CloudConfig{ - ManagementToken: token, - }, - }, - ) - mgr.testUpdateSent = updateCh - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Start the manager - err := mgr.Start(ctx) - require.NoError(t, err) - select { - case <-updateCh: - case <-time.After(time.Second): - require.Fail(t, "manager did not send update in expected time") - } - select { - case <-upsertManagementTokenCalled: - case <-time.After(time.Second): - require.Fail(t, "manager did not create token in expected time") - } - - // Send an update to ensure the manager is running in its main loop - mgr.SendUpdate() - select { - case <-updateCh: - case <-time.After(time.Second): - require.Fail(t, "manager did not send update in expected time") - } - - // Stop the manager - err = mgr.Stop() - require.NoError(t, err) - - // Validate that the management token delete function is called - select { - case <-deleteManagementTokenCalled: - case <-time.After(time.Millisecond * 100): - require.Fail(t, "manager did not create token in expected time") - } - - // Send an update, expect no update since manager is stopped - mgr.SendUpdate() - select { - case <-updateCh: - require.Fail(t, "manager sent update after stopped") - case <-time.After(time.Second): - } -} diff --git a/agent/hcp/mock_Manager.go b/agent/hcp/mock_Manager.go deleted file mode 100644 index 422d9034d88d2..0000000000000 --- a/agent/hcp/mock_Manager.go +++ /dev/null @@ -1,209 +0,0 @@ -// Code generated by mockery v2.38.0. DO NOT EDIT. - -package hcp - -import ( - client "github.com/hashicorp/consul/agent/hcp/client" - config "github.com/hashicorp/consul/agent/hcp/config" - - context "context" - - mock "github.com/stretchr/testify/mock" -) - -// MockManager is an autogenerated mock type for the Manager type -type MockManager struct { - mock.Mock -} - -type MockManager_Expecter struct { - mock *mock.Mock -} - -func (_m *MockManager) EXPECT() *MockManager_Expecter { - return &MockManager_Expecter{mock: &_m.Mock} -} - -// GetCloudConfig provides a mock function with given fields: -func (_m *MockManager) GetCloudConfig() config.CloudConfig { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetCloudConfig") - } - - var r0 config.CloudConfig - if rf, ok := ret.Get(0).(func() config.CloudConfig); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(config.CloudConfig) - } - - return r0 -} - -// MockManager_GetCloudConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCloudConfig' -type MockManager_GetCloudConfig_Call struct { - *mock.Call -} - -// GetCloudConfig is a helper method to define mock.On call -func (_e *MockManager_Expecter) GetCloudConfig() *MockManager_GetCloudConfig_Call { - return &MockManager_GetCloudConfig_Call{Call: _e.mock.On("GetCloudConfig")} -} - -func (_c *MockManager_GetCloudConfig_Call) Run(run func()) *MockManager_GetCloudConfig_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *MockManager_GetCloudConfig_Call) Return(_a0 config.CloudConfig) *MockManager_GetCloudConfig_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *MockManager_GetCloudConfig_Call) RunAndReturn(run func() config.CloudConfig) *MockManager_GetCloudConfig_Call { - _c.Call.Return(run) - return _c -} - -// Start provides a mock function with given fields: _a0 -func (_m *MockManager) Start(_a0 context.Context) error { - ret := _m.Called(_a0) - - if len(ret) == 0 { - panic("no return value specified for Start") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// MockManager_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' -type MockManager_Start_Call struct { - *mock.Call -} - -// Start is a helper method to define mock.On call -// - _a0 context.Context -func (_e *MockManager_Expecter) Start(_a0 interface{}) *MockManager_Start_Call { - return &MockManager_Start_Call{Call: _e.mock.On("Start", _a0)} -} - -func (_c *MockManager_Start_Call) Run(run func(_a0 context.Context)) *MockManager_Start_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *MockManager_Start_Call) Return(_a0 error) *MockManager_Start_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *MockManager_Start_Call) RunAndReturn(run func(context.Context) error) *MockManager_Start_Call { - _c.Call.Return(run) - return _c -} - -// Stop provides a mock function with given fields: -func (_m *MockManager) Stop() error { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Stop") - } - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// MockManager_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' -type MockManager_Stop_Call struct { - *mock.Call -} - -// Stop is a helper method to define mock.On call -func (_e *MockManager_Expecter) Stop() *MockManager_Stop_Call { - return &MockManager_Stop_Call{Call: _e.mock.On("Stop")} -} - -func (_c *MockManager_Stop_Call) Run(run func()) *MockManager_Stop_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *MockManager_Stop_Call) Return(_a0 error) *MockManager_Stop_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *MockManager_Stop_Call) RunAndReturn(run func() error) *MockManager_Stop_Call { - _c.Call.Return(run) - return _c -} - -// UpdateConfig provides a mock function with given fields: _a0, _a1 -func (_m *MockManager) UpdateConfig(_a0 client.Client, _a1 config.CloudConfig) { - _m.Called(_a0, _a1) -} - -// MockManager_UpdateConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateConfig' -type MockManager_UpdateConfig_Call struct { - *mock.Call -} - -// UpdateConfig is a helper method to define mock.On call -// - _a0 client.Client -// - _a1 config.CloudConfig -func (_e *MockManager_Expecter) UpdateConfig(_a0 interface{}, _a1 interface{}) *MockManager_UpdateConfig_Call { - return &MockManager_UpdateConfig_Call{Call: _e.mock.On("UpdateConfig", _a0, _a1)} -} - -func (_c *MockManager_UpdateConfig_Call) Run(run func(_a0 client.Client, _a1 config.CloudConfig)) *MockManager_UpdateConfig_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(client.Client), args[1].(config.CloudConfig)) - }) - return _c -} - -func (_c *MockManager_UpdateConfig_Call) Return() *MockManager_UpdateConfig_Call { - _c.Call.Return() - return _c -} - -func (_c *MockManager_UpdateConfig_Call) RunAndReturn(run func(client.Client, config.CloudConfig)) *MockManager_UpdateConfig_Call { - _c.Call.Return(run) - return _c -} - -// NewMockManager creates a new instance of MockManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockManager(t interface { - mock.TestingT - Cleanup(func()) -}) *MockManager { - mock := &MockManager{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/agent/hcp/mock_TelemetryProvider.go b/agent/hcp/mock_TelemetryProvider.go deleted file mode 100644 index f654575f5baea..0000000000000 --- a/agent/hcp/mock_TelemetryProvider.go +++ /dev/null @@ -1,115 +0,0 @@ -// Code generated by mockery v2.38.0. DO NOT EDIT. - -package hcp - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" -) - -// MockTelemetryProvider is an autogenerated mock type for the TelemetryProvider type -type MockTelemetryProvider struct { - mock.Mock -} - -type MockTelemetryProvider_Expecter struct { - mock *mock.Mock -} - -func (_m *MockTelemetryProvider) EXPECT() *MockTelemetryProvider_Expecter { - return &MockTelemetryProvider_Expecter{mock: &_m.Mock} -} - -// Start provides a mock function with given fields: ctx, c -func (_m *MockTelemetryProvider) Start(ctx context.Context, c *HCPProviderCfg) error { - ret := _m.Called(ctx, c) - - if len(ret) == 0 { - panic("no return value specified for Start") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *HCPProviderCfg) error); ok { - r0 = rf(ctx, c) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// MockTelemetryProvider_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' -type MockTelemetryProvider_Start_Call struct { - *mock.Call -} - -// Start is a helper method to define mock.On call -// - ctx context.Context -// - c *HCPProviderCfg -func (_e *MockTelemetryProvider_Expecter) Start(ctx interface{}, c interface{}) *MockTelemetryProvider_Start_Call { - return &MockTelemetryProvider_Start_Call{Call: _e.mock.On("Start", ctx, c)} -} - -func (_c *MockTelemetryProvider_Start_Call) Run(run func(ctx context.Context, c *HCPProviderCfg)) *MockTelemetryProvider_Start_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*HCPProviderCfg)) - }) - return _c -} - -func (_c *MockTelemetryProvider_Start_Call) Return(_a0 error) *MockTelemetryProvider_Start_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *MockTelemetryProvider_Start_Call) RunAndReturn(run func(context.Context, *HCPProviderCfg) error) *MockTelemetryProvider_Start_Call { - _c.Call.Return(run) - return _c -} - -// Stop provides a mock function with given fields: -func (_m *MockTelemetryProvider) Stop() { - _m.Called() -} - -// MockTelemetryProvider_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' -type MockTelemetryProvider_Stop_Call struct { - *mock.Call -} - -// Stop is a helper method to define mock.On call -func (_e *MockTelemetryProvider_Expecter) Stop() *MockTelemetryProvider_Stop_Call { - return &MockTelemetryProvider_Stop_Call{Call: _e.mock.On("Stop")} -} - -func (_c *MockTelemetryProvider_Stop_Call) Run(run func()) *MockTelemetryProvider_Stop_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *MockTelemetryProvider_Stop_Call) Return() *MockTelemetryProvider_Stop_Call { - _c.Call.Return() - return _c -} - -func (_c *MockTelemetryProvider_Stop_Call) RunAndReturn(run func()) *MockTelemetryProvider_Stop_Call { - _c.Call.Return(run) - return _c -} - -// NewMockTelemetryProvider creates a new instance of MockTelemetryProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockTelemetryProvider(t interface { - mock.TestingT - Cleanup(func()) -}) *MockTelemetryProvider { - mock := &MockTelemetryProvider{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/agent/hcp/scada/capabilities.go b/agent/hcp/scada/capabilities.go index bbb6ea6266dc3..c18192ae6e94a 100644 --- a/agent/hcp/scada/capabilities.go +++ b/agent/hcp/scada/capabilities.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package scada diff --git a/agent/hcp/scada/mock_Provider.go b/agent/hcp/scada/mock_Provider.go index 7e922cb21bc48..b9a0fd2d4967e 100644 --- a/agent/hcp/scada/mock_Provider.go +++ b/agent/hcp/scada/mock_Provider.go @@ -1,13 +1,12 @@ -// Code generated by mockery v2.38.0. DO NOT EDIT. +// Code generated by mockery v2.20.0. DO NOT EDIT. package scada import ( - config "github.com/hashicorp/consul/agent/hcp/config" - mock "github.com/stretchr/testify/mock" - net "net" + mock "github.com/stretchr/testify/mock" + provider "github.com/hashicorp/hcp-scada-provider" time "time" @@ -122,10 +121,6 @@ func (_c *MockProvider_DeleteMeta_Call) RunAndReturn(run func(...string)) *MockP func (_m *MockProvider) GetMeta() map[string]string { ret := _m.Called() - if len(ret) == 0 { - panic("no return value specified for GetMeta") - } - var r0 map[string]string if rf, ok := ret.Get(0).(func() map[string]string); ok { r0 = rf() @@ -169,10 +164,6 @@ func (_c *MockProvider_GetMeta_Call) RunAndReturn(run func() map[string]string) func (_m *MockProvider) LastError() (time.Time, error) { ret := _m.Called() - if len(ret) == 0 { - panic("no return value specified for LastError") - } - var r0 time.Time var r1 error if rf, ok := ret.Get(0).(func() (time.Time, error)); ok { @@ -224,10 +215,6 @@ func (_c *MockProvider_LastError_Call) RunAndReturn(run func() (time.Time, error func (_m *MockProvider) Listen(capability string) (net.Listener, error) { ret := _m.Called(capability) - if len(ret) == 0 { - panic("no return value specified for Listen") - } - var r0 net.Listener var r1 error if rf, ok := ret.Get(0).(func(string) (net.Listener, error)); ok { @@ -282,10 +269,6 @@ func (_c *MockProvider_Listen_Call) RunAndReturn(run func(string) (net.Listener, func (_m *MockProvider) SessionStatus() string { ret := _m.Called() - if len(ret) == 0 { - panic("no return value specified for SessionStatus") - } - var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -327,10 +310,6 @@ func (_c *MockProvider_SessionStatus_Call) RunAndReturn(run func() string) *Mock func (_m *MockProvider) Start() error { ret := _m.Called() - if len(ret) == 0 { - panic("no return value specified for Start") - } - var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -372,10 +351,6 @@ func (_c *MockProvider_Start_Call) RunAndReturn(run func() error) *MockProvider_ func (_m *MockProvider) Stop() error { ret := _m.Called() - if len(ret) == 0 { - panic("no return value specified for Stop") - } - var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -413,98 +388,6 @@ func (_c *MockProvider_Stop_Call) RunAndReturn(run func() error) *MockProvider_S return _c } -// UpdateConfig provides a mock function with given fields: _a0 -func (_m *MockProvider) UpdateConfig(_a0 *provider.Config) error { - ret := _m.Called(_a0) - - if len(ret) == 0 { - panic("no return value specified for UpdateConfig") - } - - var r0 error - if rf, ok := ret.Get(0).(func(*provider.Config) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// MockProvider_UpdateConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateConfig' -type MockProvider_UpdateConfig_Call struct { - *mock.Call -} - -// UpdateConfig is a helper method to define mock.On call -// - _a0 *provider.Config -func (_e *MockProvider_Expecter) UpdateConfig(_a0 interface{}) *MockProvider_UpdateConfig_Call { - return &MockProvider_UpdateConfig_Call{Call: _e.mock.On("UpdateConfig", _a0)} -} - -func (_c *MockProvider_UpdateConfig_Call) Run(run func(_a0 *provider.Config)) *MockProvider_UpdateConfig_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*provider.Config)) - }) - return _c -} - -func (_c *MockProvider_UpdateConfig_Call) Return(_a0 error) *MockProvider_UpdateConfig_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *MockProvider_UpdateConfig_Call) RunAndReturn(run func(*provider.Config) error) *MockProvider_UpdateConfig_Call { - _c.Call.Return(run) - return _c -} - -// UpdateHCPConfig provides a mock function with given fields: cfg -func (_m *MockProvider) UpdateHCPConfig(cfg config.CloudConfig) error { - ret := _m.Called(cfg) - - if len(ret) == 0 { - panic("no return value specified for UpdateHCPConfig") - } - - var r0 error - if rf, ok := ret.Get(0).(func(config.CloudConfig) error); ok { - r0 = rf(cfg) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// MockProvider_UpdateHCPConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateHCPConfig' -type MockProvider_UpdateHCPConfig_Call struct { - *mock.Call -} - -// UpdateHCPConfig is a helper method to define mock.On call -// - cfg config.CloudConfig -func (_e *MockProvider_Expecter) UpdateHCPConfig(cfg interface{}) *MockProvider_UpdateHCPConfig_Call { - return &MockProvider_UpdateHCPConfig_Call{Call: _e.mock.On("UpdateHCPConfig", cfg)} -} - -func (_c *MockProvider_UpdateHCPConfig_Call) Run(run func(cfg config.CloudConfig)) *MockProvider_UpdateHCPConfig_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(config.CloudConfig)) - }) - return _c -} - -func (_c *MockProvider_UpdateHCPConfig_Call) Return(_a0 error) *MockProvider_UpdateHCPConfig_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *MockProvider_UpdateHCPConfig_Call) RunAndReturn(run func(config.CloudConfig) error) *MockProvider_UpdateHCPConfig_Call { - _c.Call.Return(run) - return _c -} - // UpdateMeta provides a mock function with given fields: _a0 func (_m *MockProvider) UpdateMeta(_a0 map[string]string) { _m.Called(_a0) @@ -538,12 +421,13 @@ func (_c *MockProvider_UpdateMeta_Call) RunAndReturn(run func(map[string]string) return _c } -// NewMockProvider creates a new instance of MockProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockProvider(t interface { +type mockConstructorTestingTNewMockProvider interface { mock.TestingT Cleanup(func()) -}) *MockProvider { +} + +// NewMockProvider creates a new instance of MockProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMockProvider(t mockConstructorTestingTNewMockProvider) *MockProvider { mock := &MockProvider{} mock.Mock.Test(t) diff --git a/agent/hcp/scada/scada.go b/agent/hcp/scada/scada.go index c62f45908bcf1..5aba819bda499 100644 --- a/agent/hcp/scada/scada.go +++ b/agent/hcp/scada/scada.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package scada @@ -11,8 +11,7 @@ import ( "github.com/hashicorp/go-hclog" libscada "github.com/hashicorp/hcp-scada-provider" "github.com/hashicorp/hcp-scada-provider/capability" - cloud "github.com/hashicorp/hcp-sdk-go/clients/cloud-shared/v1/models" - hcpcfg "github.com/hashicorp/hcp-sdk-go/config" + "github.com/hashicorp/hcp-sdk-go/resource" ) // Provider is the interface used in the rest of Consul core when using SCADA, it is aliased here to the same interface @@ -22,72 +21,34 @@ import ( //go:generate mockery --name Provider --with-expecter --inpackage type Provider interface { libscada.SCADAProvider - UpdateHCPConfig(cfg config.CloudConfig) error } const ( scadaConsulServiceKey = "consul" ) -type scadaProvider struct { - libscada.SCADAProvider - logger hclog.Logger -} - -// New returns an initialized SCADA provider with a zero configuration. -// It can listen but cannot start until UpdateHCPConfig is called with -// a configuration that provides credentials to contact HCP. -func New(logger hclog.Logger) (*scadaProvider, error) { - // Create placeholder resource link - resourceLink := cloud.HashicorpCloudLocationLink{ - Type: "no-op", - ID: "no-op", - Location: &cloud.HashicorpCloudLocationLocation{}, +func New(cfg config.CloudConfig, logger hclog.Logger) (Provider, error) { + resource, err := resource.FromString(cfg.ResourceID) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud resource_id: %w", err) } - // Configure with an empty HCP configuration - hcpConfig, err := hcpcfg.NewHCPConfig(hcpcfg.WithoutBrowserLogin()) + hcpConfig, err := cfg.HCPConfig() if err != nil { - return nil, fmt.Errorf("failed to configure SCADA provider: %w", err) + return nil, fmt.Errorf("failed to build HCPConfig: %w", err) } pvd, err := libscada.New(&libscada.Config{ Service: scadaConsulServiceKey, HCPConfig: hcpConfig, - Resource: resourceLink, + Resource: *resource.Link(), Logger: logger, }) if err != nil { return nil, err } - return &scadaProvider{pvd, logger}, nil -} - -// UpdateHCPConfig updates the SCADA provider with the given HCP -// configurations. -func (p *scadaProvider) UpdateHCPConfig(cfg config.CloudConfig) error { - resource, err := cfg.Resource() - if err != nil { - return err - } - - hcpCfg, err := cfg.HCPConfig() - if err != nil { - return err - } - - err = p.UpdateConfig(&libscada.Config{ - Service: scadaConsulServiceKey, - HCPConfig: hcpCfg, - Resource: *resource.Link(), - Logger: p.logger, - }) - if err != nil { - return err - } - - return nil + return pvd, nil } // IsCapability takes a net.Addr and returns true if it is a SCADA capability.Addr diff --git a/agent/hcp/scada/scada_test.go b/agent/hcp/scada/scada_test.go deleted file mode 100644 index 0cebed1b93a4c..0000000000000 --- a/agent/hcp/scada/scada_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package scada - -import ( - "testing" - - "github.com/hashicorp/consul/agent/hcp/config" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/require" -) - -func TestUpdateHCPConfig(t *testing.T) { - for name, tc := range map[string]struct { - cfg config.CloudConfig - expectedErr string - }{ - "Success": { - cfg: config.CloudConfig{ - ResourceID: "organization/85702e73-8a3d-47dc-291c-379b783c5804/project/8c0547c0-10e8-1ea2-dffe-384bee8da634/hashicorp.consul.global-network-manager.cluster/test", - ClientID: "test", - ClientSecret: "test", - }, - }, - "Empty": { - cfg: config.CloudConfig{}, - expectedErr: "could not parse resource: unexpected number of tokens 1", - }, - "InvalidResource": { - cfg: config.CloudConfig{ - ResourceID: "invalid", - }, - expectedErr: "could not parse resource: unexpected number of tokens 1", - }, - } { - t.Run(name, func(t *testing.T) { - // Create a provider - p, err := New(hclog.NewNullLogger()) - require.NoError(t, err) - - // Update the provider - err = p.UpdateHCPConfig(tc.cfg) - if tc.expectedErr != "" { - require.Error(t, err) - require.Contains(t, err.Error(), tc.expectedErr) - return - } - require.NoError(t, err) - }) - } -} diff --git a/agent/hcp/telemetry/otel_exporter.go b/agent/hcp/telemetry/otel_exporter.go index 050d5660668d2..4618763336556 100644 --- a/agent/hcp/telemetry/otel_exporter.go +++ b/agent/hcp/telemetry/otel_exporter.go @@ -23,9 +23,7 @@ type MetricsClient interface { // EndpointProvider provides the endpoint where metrics are exported to by the OTELExporter. // EndpointProvider exposes the GetEndpoint() interface method to fetch the endpoint. // This abstraction layer offers flexibility, in particular for dynamic configuration or changes to the endpoint. -// The OTELExporter calls the Disabled interface to verify that it should actually export metrics. type EndpointProvider interface { - Disabled GetEndpoint() *url.URL } @@ -70,10 +68,6 @@ func (e *otelExporter) Aggregation(kind metric.InstrumentKind) aggregation.Aggre // Export serializes and transmits metric data to a receiver. func (e *otelExporter) Export(ctx context.Context, metrics *metricdata.ResourceMetrics) error { - if e.endpointProvider.IsDisabled() { - return nil - } - endpoint := e.endpointProvider.GetEndpoint() if endpoint == nil { return nil diff --git a/agent/hcp/telemetry/otel_exporter_test.go b/agent/hcp/telemetry/otel_exporter_test.go index ebe6486abca85..610fbb44e74bc 100644 --- a/agent/hcp/telemetry/otel_exporter_test.go +++ b/agent/hcp/telemetry/otel_exporter_test.go @@ -34,11 +34,9 @@ func (m *mockMetricsClient) ExportMetrics(ctx context.Context, protoMetrics *met type mockEndpointProvider struct { endpoint *url.URL - disabled bool } func (m *mockEndpointProvider) GetEndpoint() *url.URL { return m.endpoint } -func (m *mockEndpointProvider) IsDisabled() bool { return m.disabled } func TestTemporality(t *testing.T) { t.Parallel() @@ -82,20 +80,13 @@ func TestExport(t *testing.T) { client MetricsClient provider EndpointProvider }{ - "earlyReturnDisabledProvider": { - client: &mockMetricsClient{}, - provider: &mockEndpointProvider{ - disabled: true, - }, - }, "earlyReturnWithoutEndpoint": { client: &mockMetricsClient{}, provider: &mockEndpointProvider{}, }, "earlyReturnWithoutScopeMetrics": { - client: &mockMetricsClient{}, - metrics: mutateMetrics(nil), - provider: &mockEndpointProvider{}, + client: &mockMetricsClient{}, + metrics: mutateMetrics(nil), }, "earlyReturnWithoutMetrics": { client: &mockMetricsClient{}, @@ -103,7 +94,6 @@ func TestExport(t *testing.T) { {Metrics: []metricdata.Metrics{}}, }, ), - provider: &mockEndpointProvider{}, }, "errorWithExportFailure": { client: &mockMetricsClient{ @@ -120,9 +110,6 @@ func TestExport(t *testing.T) { }, }, ), - provider: &mockEndpointProvider{ - endpoint: &url.URL{}, - }, wantErr: "failed to export metrics", }, } { diff --git a/agent/hcp/telemetry/otel_sink.go b/agent/hcp/telemetry/otel_sink.go index ad310774047b2..7770f7eaaf16c 100644 --- a/agent/hcp/telemetry/otel_sink.go +++ b/agent/hcp/telemetry/otel_sink.go @@ -36,15 +36,8 @@ const ( defaultExportTimeout = 30 * time.Second ) -// Disabled should be implemented to turn on/off metrics processing -type Disabled interface { - // IsDisabled() can return true disallow the sink from accepting metrics. - IsDisabled() bool -} - // ConfigProvider is required to provide custom metrics processing. type ConfigProvider interface { - Disabled // GetLabels should return a set of OTEL attributes added by default all metrics. GetLabels() map[string]string @@ -136,10 +129,6 @@ func NewOTELSink(ctx context.Context, opts *OTELSinkOpts) (*OTELSink, error) { }, nil } -func (o *OTELSink) Shutdown() { - o.meterProvider.Shutdown(context.TODO()) -} - // SetGauge emits a Consul gauge metric. func (o *OTELSink) SetGauge(key []string, val float32) { o.SetGaugeWithLabels(key, val, nil) @@ -158,11 +147,8 @@ func (o *OTELSink) IncrCounter(key []string, val float32) { // AddSampleWithLabels emits a Consul gauge metric that gets // registed by an OpenTelemetry Histogram instrument. func (o *OTELSink) SetGaugeWithLabels(key []string, val float32, labels []gometrics.Label) { - if o.cfgProvider.IsDisabled() { - return - } - k := o.flattenKey(key) + if !o.allowedMetric(k) { return } @@ -189,11 +175,8 @@ func (o *OTELSink) SetGaugeWithLabels(key []string, val float32, labels []gometr // AddSampleWithLabels emits a Consul sample metric that gets registed by an OpenTelemetry Histogram instrument. func (o *OTELSink) AddSampleWithLabels(key []string, val float32, labels []gometrics.Label) { - if o.cfgProvider.IsDisabled() { - return - } - k := o.flattenKey(key) + if !o.allowedMetric(k) { return } @@ -218,11 +201,8 @@ func (o *OTELSink) AddSampleWithLabels(key []string, val float32, labels []gomet // IncrCounterWithLabels emits a Consul counter metric that gets registed by an OpenTelemetry Histogram instrument. func (o *OTELSink) IncrCounterWithLabels(key []string, val float32, labels []gometrics.Label) { - if o.cfgProvider.IsDisabled() { - return - } - k := o.flattenKey(key) + if !o.allowedMetric(k) { return } diff --git a/agent/hcp/telemetry/otel_sink_test.go b/agent/hcp/telemetry/otel_sink_test.go index 683f33a3a40d6..13c310b34ca03 100644 --- a/agent/hcp/telemetry/otel_sink_test.go +++ b/agent/hcp/telemetry/otel_sink_test.go @@ -21,9 +21,8 @@ import ( ) type mockConfigProvider struct { - filter *regexp.Regexp - labels map[string]string - disabled bool + filter *regexp.Regexp + labels map[string]string } func (m *mockConfigProvider) GetLabels() map[string]string { @@ -34,10 +33,6 @@ func (m *mockConfigProvider) GetFilters() *regexp.Regexp { return m.filter } -func (m *mockConfigProvider) IsDisabled() bool { - return m.disabled -} - var ( expectedResource = resource.NewSchemaless() @@ -228,29 +223,6 @@ func TestOTELSink(t *testing.T) { isSame(t, expectedSinkMetrics, collected) } -func TestOTELSinkDisabled(t *testing.T) { - reader := metric.NewManualReader() - ctx := context.Background() - - sink, err := NewOTELSink(ctx, &OTELSinkOpts{ - ConfigProvider: &mockConfigProvider{ - filter: regexp.MustCompile("raft"), - disabled: true, - }, - Reader: reader, - }) - require.NoError(t, err) - - sink.SetGauge([]string{"consul", "raft", "gauge"}, 1) - sink.IncrCounter([]string{"consul", "raft", "counter"}, 1) - sink.AddSample([]string{"consul", "raft", "sample"}, 1) - - var collected metricdata.ResourceMetrics - err = reader.Collect(ctx, &collected) - require.NoError(t, err) - require.Empty(t, collected.ScopeMetrics) -} - func TestLabelsToAttributes(t *testing.T) { for name, test := range map[string]struct { providerLabels map[string]string @@ -334,7 +306,7 @@ func TestLabelsToAttributes(t *testing.T) { sink, err := NewOTELSink(ctx, opts) require.NoError(t, err) - require.ElementsMatch(t, test.expectedOTELAttributes, sink.labelsToAttributes(test.goMetricsLabels)) + require.Equal(t, test.expectedOTELAttributes, sink.labelsToAttributes(test.goMetricsLabels)) }) } } diff --git a/agent/hcp/telemetry/otlp_transform.go b/agent/hcp/telemetry/otlp_transform.go index 907e7922ad98d..a244f0f1a5f61 100644 --- a/agent/hcp/telemetry/otlp_transform.go +++ b/agent/hcp/telemetry/otlp_transform.go @@ -16,8 +16,8 @@ import ( ) var ( - errAggregaton = errors.New("unsupported aggregation") - errTemporality = errors.New("unsupported temporality") + aggregationErr = errors.New("unsupported aggregation") + temporalityErr = errors.New("unsupported temporality") ) // isEmpty verifies if the given OTLP protobuf metrics contains metric data. @@ -99,7 +99,7 @@ func metricTypeToPB(m metricdata.Metrics) (*mpb.Metric, error) { } case metricdata.Sum[float64]: if a.Temporality != metricdata.CumulativeTemporality { - return out, fmt.Errorf("failed to convert metric to otel format: %w: %T", errTemporality, a) + return out, fmt.Errorf("error: %w: %T", temporalityErr, a) } out.Data = &mpb.Metric_Sum{ Sum: &mpb.Sum{ @@ -110,7 +110,7 @@ func metricTypeToPB(m metricdata.Metrics) (*mpb.Metric, error) { } case metricdata.Histogram[float64]: if a.Temporality != metricdata.CumulativeTemporality { - return out, fmt.Errorf("failed to convert metric to otel format: %w: %T", errTemporality, a) + return out, fmt.Errorf("error: %w: %T", temporalityErr, a) } out.Data = &mpb.Metric_Histogram{ Histogram: &mpb.Histogram{ @@ -119,7 +119,7 @@ func metricTypeToPB(m metricdata.Metrics) (*mpb.Metric, error) { }, } default: - return out, fmt.Errorf("failed to convert metric to otel format: %w: %T", errAggregaton, a) + return out, fmt.Errorf("error: %w: %T", aggregationErr, a) } return out, nil } diff --git a/agent/hcp/telemetry/otlp_transform_test.go b/agent/hcp/telemetry/otlp_transform_test.go index d67df73d83433..04ff40382ddab 100644 --- a/agent/hcp/telemetry/otlp_transform_test.go +++ b/agent/hcp/telemetry/otlp_transform_test.go @@ -260,15 +260,15 @@ func TestTransformOTLP(t *testing.T) { // MetricType Error Test Cases _, err := metricTypeToPB(invalidHistTemporality) require.Error(t, err) - require.ErrorIs(t, err, errTemporality) + require.ErrorIs(t, err, temporalityErr) _, err = metricTypeToPB(invalidSumTemporality) require.Error(t, err) - require.ErrorIs(t, err, errTemporality) + require.ErrorIs(t, err, temporalityErr) _, err = metricTypeToPB(invalidSumAgg) require.Error(t, err) - require.ErrorIs(t, err, errAggregaton) + require.ErrorIs(t, err, aggregationErr) // Metrics Test Case m := metricsToPB(inputMetrics) diff --git a/agent/hcp/telemetry_provider.go b/agent/hcp/telemetry_provider.go index a575d63f8a82c..870d3b3685a44 100644 --- a/agent/hcp/telemetry_provider.go +++ b/agent/hcp/telemetry_provider.go @@ -5,24 +5,17 @@ package hcp import ( "context" - "errors" "fmt" - "net/http" "net/url" - "reflect" "regexp" "sync" "time" "github.com/armon/go-metrics" - "github.com/go-openapi/runtime" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-retryablehttp" "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" "github.com/hashicorp/consul/agent/hcp/telemetry" - "github.com/hashicorp/consul/version" ) var ( @@ -30,217 +23,115 @@ var ( internalMetricRefreshFailure []string = []string{"hcp", "telemetry_config_provider", "refresh", "failure"} // internalMetricRefreshSuccess is a metric to monitor refresh successes. internalMetricRefreshSuccess []string = []string{"hcp", "telemetry_config_provider", "refresh", "success"} - // defaultTelemetryConfigRefreshInterval is a default fallback in case the first HCP fetch fails. - defaultTelemetryConfigRefreshInterval = 1 * time.Minute ) // Ensure hcpProviderImpl implements telemetry provider interfaces. -var _ TelemetryProvider = &hcpProviderImpl{} var _ telemetry.ConfigProvider = &hcpProviderImpl{} var _ telemetry.EndpointProvider = &hcpProviderImpl{} -var _ client.MetricsClientProvider = &hcpProviderImpl{} // hcpProviderImpl holds telemetry configuration and settings for continuous fetch of new config from HCP. // it updates configuration, if changes are detected. type hcpProviderImpl struct { // cfg holds configuration that can be dynamically updated. cfg *dynamicConfig - // httpCfg holds configuration for the HTTP client - httpCfg *httpCfg - // Reader-writer mutexes are used as the provider is read heavy. + // A reader-writer mutex is used as the provider is read heavy. // OTEL components access telemetryConfig during metrics collection and export (read). - // Meanwhile, configs are only updated when there are changes (write). - rw sync.RWMutex - httpCfgRW sync.RWMutex - - // running indicates if the HCP telemetry config provider has been started - running bool - - // stopCh is used to signal that the telemetry config provider should stop running. - stopCh chan struct{} - + // Meanwhile, config is only updated when there are changes (write). + rw sync.RWMutex // hcpClient is an authenticated client used to make HTTP requests to HCP. hcpClient client.Client - - // logger is the HCP logger for the provider - logger hclog.Logger - - // testUpdateConfigCh is used by unit tests to signal when an update config has occurred - testUpdateConfigCh chan struct{} } // dynamicConfig is a set of configurable settings for metrics collection, processing and export. // fields MUST be exported to compute hash for equals method. type dynamicConfig struct { - disabled bool - endpoint *url.URL - labels map[string]string - filters *regexp.Regexp + Endpoint *url.URL + Labels map[string]string + Filters *regexp.Regexp // refreshInterval controls the interval at which configuration is fetched from HCP to refresh config. - refreshInterval time.Duration -} - -// defaultDisabledCfg disables metric collection and contains default config values. -func defaultDisabledCfg() *dynamicConfig { - return &dynamicConfig{ - labels: map[string]string{}, - filters: client.DefaultMetricFilters, - refreshInterval: defaultTelemetryConfigRefreshInterval, - endpoint: nil, - disabled: true, - } -} - -// httpCfg is a set of configurable settings for the HTTP client used to export metrics -type httpCfg struct { - header *http.Header - client *retryablehttp.Client + RefreshInterval time.Duration } -//go:generate mockery --name TelemetryProvider --with-expecter --inpackage -type TelemetryProvider interface { - Start(ctx context.Context, c *HCPProviderCfg) error - Stop() -} - -type HCPProviderCfg struct { - HCPClient client.Client - HCPConfig config.CloudConfigurer -} - -// NewHCPProvider initializes and starts a HCP Telemetry provider. -func NewHCPProvider(ctx context.Context) *hcpProviderImpl { - h := &hcpProviderImpl{ - // Initialize with default config values. - cfg: defaultDisabledCfg(), - httpCfg: &httpCfg{}, - logger: hclog.FromContext(ctx), +// NewHCPProvider initializes and starts a HCP Telemetry provider with provided params. +func NewHCPProvider(ctx context.Context, hcpClient client.Client, telemetryCfg *client.TelemetryConfig) (*hcpProviderImpl, error) { + refreshInterval := telemetryCfg.RefreshConfig.RefreshInterval + // refreshInterval must be greater than 0, otherwise time.Ticker panics. + if refreshInterval <= 0 { + return nil, fmt.Errorf("invalid refresh interval: %d", refreshInterval) } - return h -} - -// Start starts a process that continuously checks for updates to the telemetry configuration -// by making a request to HCP. It only starts running if it's not already running. -func (h *hcpProviderImpl) Start(ctx context.Context, c *HCPProviderCfg) error { - changed := h.setRunning(true) - if !changed { - // Provider is already running. - return nil + cfg := &dynamicConfig{ + Endpoint: telemetryCfg.MetricsConfig.Endpoint, + Labels: telemetryCfg.MetricsConfig.Labels, + Filters: telemetryCfg.MetricsConfig.Filters, + RefreshInterval: refreshInterval, } - // Update the provider with the HCP configurations - h.hcpClient = c.HCPClient - err := h.updateHTTPConfig(c.HCPConfig) - if err != nil { - return fmt.Errorf("failed to initialize HCP telemetry provider: %v", err) + t := &hcpProviderImpl{ + cfg: cfg, + hcpClient: hcpClient, } - go h.run(ctx) + go t.run(ctx, refreshInterval) - return nil + return t, nil } -// run continuously checks for updates to the telemetry configuration by making a request to HCP. -func (h *hcpProviderImpl) run(ctx context.Context) error { - h.logger.Debug("starting telemetry config provider") - - // Try to initialize config once before starting periodic fetch. - h.updateConfig(ctx) - - ticker := time.NewTicker(h.getRefreshInterval()) +// run continously checks for updates to the telemetry configuration by making a request to HCP. +func (h *hcpProviderImpl) run(ctx context.Context, refreshInterval time.Duration) { + ticker := time.NewTicker(refreshInterval) defer ticker.Stop() for { select { case <-ticker.C: - if newRefreshInterval := h.updateConfig(ctx); newRefreshInterval > 0 { - ticker.Reset(newRefreshInterval) + if newCfg := h.getUpdate(ctx); newCfg != nil { + ticker.Reset(newCfg.RefreshInterval) } case <-ctx.Done(): - return nil - case <-h.stopCh: - return nil + return } } } -// updateConfig makes a HTTP request to HCP to update metrics configuration held in the provider. -func (h *hcpProviderImpl) updateConfig(ctx context.Context) time.Duration { - logger := h.logger.Named("telemetry_config_provider") - - if h.testUpdateConfigCh != nil { - defer func() { - select { - case h.testUpdateConfigCh <- struct{}{}: - default: - } - }() - } - - if h.hcpClient == nil || reflect.ValueOf(h.hcpClient).IsNil() { - // Disable metrics if HCP client is not configured - disabledMetricsCfg := defaultDisabledCfg() - h.modifyDynamicCfg(disabledMetricsCfg) - return disabledMetricsCfg.refreshInterval - } +// getUpdate makes a HTTP request to HCP to return a new metrics configuration +// and updates the hcpProviderImpl. +func (h *hcpProviderImpl) getUpdate(ctx context.Context) *dynamicConfig { + logger := hclog.FromContext(ctx).Named("telemetry_config_provider") ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - logger.Trace("fetching telemetry config") telemetryCfg, err := h.hcpClient.FetchTelemetryConfig(ctx) if err != nil { - // Only disable metrics on 404 or 401 to handle the case of an unlinked cluster. - // For other errors such as 5XX ones, we continue metrics collection, as these are potentially transient server-side errors. - apiErr, ok := err.(*runtime.APIError) - if ok && apiErr.IsClientError() { - disabledMetricsCfg := defaultDisabledCfg() - h.modifyDynamicCfg(disabledMetricsCfg) - return disabledMetricsCfg.refreshInterval - } - logger.Error("failed to fetch telemetry config from HCP", "error", err) metrics.IncrCounter(internalMetricRefreshFailure, 1) - return 0 + return nil } - logger.Trace("successfully fetched telemetry config") // newRefreshInterval of 0 or less can cause ticker Reset() panic. newRefreshInterval := telemetryCfg.RefreshConfig.RefreshInterval if newRefreshInterval <= 0 { logger.Error("invalid refresh interval duration", "refreshInterval", newRefreshInterval) metrics.IncrCounter(internalMetricRefreshFailure, 1) - return 0 + return nil } - newCfg := &dynamicConfig{ - filters: telemetryCfg.MetricsConfig.Filters, - endpoint: telemetryCfg.MetricsConfig.Endpoint, - labels: telemetryCfg.MetricsConfig.Labels, - refreshInterval: telemetryCfg.RefreshConfig.RefreshInterval, - disabled: telemetryCfg.MetricsConfig.Disabled, + newDynamicConfig := &dynamicConfig{ + Filters: telemetryCfg.MetricsConfig.Filters, + Endpoint: telemetryCfg.MetricsConfig.Endpoint, + Labels: telemetryCfg.MetricsConfig.Labels, + RefreshInterval: newRefreshInterval, } - h.modifyDynamicCfg(newCfg) - - return newCfg.refreshInterval -} - -// modifyDynamicCfg acquires a write lock to update new configuration and emits a success metric. -func (h *hcpProviderImpl) modifyDynamicCfg(newCfg *dynamicConfig) { + // Acquire write lock to update new configuration. h.rw.Lock() - h.cfg = newCfg + h.cfg = newDynamicConfig h.rw.Unlock() metrics.IncrCounter(internalMetricRefreshSuccess, 1) -} -func (h *hcpProviderImpl) getRefreshInterval() time.Duration { - h.rw.RLock() - defer h.rw.RUnlock() - - return h.cfg.refreshInterval + return newDynamicConfig } // GetEndpoint acquires a read lock to return endpoint configuration for consumers. @@ -248,7 +139,7 @@ func (h *hcpProviderImpl) GetEndpoint() *url.URL { h.rw.RLock() defer h.rw.RUnlock() - return h.cfg.endpoint + return h.cfg.Endpoint } // GetFilters acquires a read lock to return filters configuration for consumers. @@ -256,7 +147,7 @@ func (h *hcpProviderImpl) GetFilters() *regexp.Regexp { h.rw.RLock() defer h.rw.RUnlock() - return h.cfg.filters + return h.cfg.Filters } // GetLabels acquires a read lock to return labels configuration for consumers. @@ -264,106 +155,5 @@ func (h *hcpProviderImpl) GetLabels() map[string]string { h.rw.RLock() defer h.rw.RUnlock() - return h.cfg.labels -} - -// IsDisabled acquires a read lock and return true if metrics are enabled. -func (h *hcpProviderImpl) IsDisabled() bool { - h.rw.RLock() - defer h.rw.RUnlock() - - return h.cfg.disabled -} - -// updateHTTPConfig updates the HTTP configuration values that rely on the HCP configuration. -func (h *hcpProviderImpl) updateHTTPConfig(cfg config.CloudConfigurer) error { - h.httpCfgRW.Lock() - defer h.httpCfgRW.Unlock() - - if cfg == nil { - return errors.New("must provide valid HCP configuration") - } - - // Update headers - r, err := cfg.Resource() - if err != nil { - return fmt.Errorf("failed set telemetry client headers: %v", err) - } - header := make(http.Header) - header.Set("content-type", "application/x-protobuf") - header.Set("x-hcp-resource-id", r.String()) - header.Set("x-channel", fmt.Sprintf("consul/%s", version.GetHumanVersion())) - h.httpCfg.header = &header - - // Update HTTP client - hcpCfg, err := cfg.HCPConfig() - if err != nil { - return fmt.Errorf("failed to configure telemetry HTTP client: %v", err) - } - h.httpCfg.client = client.NewHTTPClient(hcpCfg.APITLSConfig(), hcpCfg) - - return nil -} - -// GetHeader acquires a read lock to return the HTTP request headers needed -// to export metrics. -func (h *hcpProviderImpl) GetHeader() http.Header { - h.httpCfgRW.RLock() - defer h.httpCfgRW.RUnlock() - - if h.httpCfg.header == nil { - return nil - } - - return h.httpCfg.header.Clone() -} - -// GetHTTPClient acquires a read lock to return the retryable HTTP client needed -// to export metrics. -func (h *hcpProviderImpl) GetHTTPClient() *retryablehttp.Client { - h.httpCfgRW.RLock() - defer h.httpCfgRW.RUnlock() - - return h.httpCfg.client -} - -// setRunning acquires a write lock to set whether the provider is running. -// If the given value is the same as the current running status, it returns -// false. If current status is updated to the given status, it returns true. -func (h *hcpProviderImpl) setRunning(r bool) bool { - h.rw.Lock() - defer h.rw.Unlock() - - if h.running == r { - return false - } - - // Initialize or close the stop channel depending what running status - // we're transitioning to. Channel must be initialized on start since - // a provider can be stopped and started multiple times. - if r { - h.stopCh = make(chan struct{}) - } else { - close(h.stopCh) - } - - h.running = r - - return true -} - -// Stop acquires a write lock to mark the provider as not running and sends a stop signal to the -// main run loop. It also updates the provider with a disabled configuration. -func (h *hcpProviderImpl) Stop() { - changed := h.setRunning(false) - if !changed { - h.logger.Trace("telemetry config provider already stopped") - return - } - - h.rw.Lock() - h.cfg = defaultDisabledCfg() - h.rw.Unlock() - - h.logger.Debug("telemetry config provider stopped") + return h.cfg.Labels } diff --git a/agent/hcp/telemetry_provider_test.go b/agent/hcp/telemetry_provider_test.go index 6801b9271ebc2..7386e4c32066e 100644 --- a/agent/hcp/telemetry_provider_test.go +++ b/agent/hcp/telemetry_provider_test.go @@ -6,7 +6,6 @@ package hcp import ( "context" "fmt" - "net/http" "net/url" "regexp" "strings" @@ -15,15 +14,11 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/go-openapi/runtime" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" - "github.com/hashicorp/consul/version" - "github.com/hashicorp/go-hclog" ) const ( @@ -44,47 +39,64 @@ type testConfig struct { endpoint string labels map[string]string refreshInterval time.Duration - disabled bool } -func TestNewTelemetryConfigProvider_DefaultConfig(t *testing.T) { +func TestNewTelemetryConfigProvider(t *testing.T) { t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + for name, tc := range map[string]struct { + testInputs *testConfig + wantErr string + }{ + "success": { + testInputs: &testConfig{ + refreshInterval: 1 * time.Second, + }, + }, + "failsWithInvalidRefreshInterval": { + testInputs: &testConfig{ + refreshInterval: 0 * time.Second, + }, + wantErr: "invalid refresh interval", + }, + } { + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCfg, err := testTelemetryCfg(tc.testInputs) + require.NoError(t, err) - // Initialize new provider - provider := NewHCPProvider(ctx) - provider.updateConfig(ctx) - - // Assert provider has default configuration and metrics processing is disabled. - defaultCfg := &dynamicConfig{ - labels: map[string]string{}, - filters: client.DefaultMetricFilters, - refreshInterval: defaultTelemetryConfigRefreshInterval, - endpoint: nil, - disabled: true, + cfgProvider, err := NewHCPProvider(ctx, client.NewMockClient(t), testCfg) + if tc.wantErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.wantErr) + require.Nil(t, cfgProvider) + return + } + require.NotNil(t, cfgProvider) + }) } - require.Equal(t, defaultCfg, provider.cfg) } -func TestTelemetryConfigProvider_UpdateConfig(t *testing.T) { +func TestTelemetryConfigProviderGetUpdate(t *testing.T) { for name, tc := range map[string]struct { - mockExpect func(*client.MockClient) - metricKey string - initCfg *dynamicConfig - expected *dynamicConfig - expectedInterval time.Duration - skipHCPClient bool + mockExpect func(*client.MockClient) + metricKey string + optsInputs *testConfig + expected *testConfig }{ "noChanges": { - initCfg: testDynamicCfg(&testConfig{ + optsInputs: &testConfig{ endpoint: "http://test.com/v1/metrics", filters: "test", labels: map[string]string{ "test_label": "123", }, refreshInterval: testRefreshInterval, - }), + }, mockExpect: func(m *client.MockClient) { mockCfg, _ := testTelemetryCfg(&testConfig{ endpoint: "http://test.com/v1/metrics", @@ -96,26 +108,25 @@ func TestTelemetryConfigProvider_UpdateConfig(t *testing.T) { }) m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mockCfg, nil) }, - expected: testDynamicCfg(&testConfig{ + expected: &testConfig{ endpoint: "http://test.com/v1/metrics", labels: map[string]string{ "test_label": "123", }, filters: "test", refreshInterval: testRefreshInterval, - }), - metricKey: testMetricKeySuccess, - expectedInterval: testRefreshInterval, + }, + metricKey: testMetricKeySuccess, }, "newConfig": { - initCfg: testDynamicCfg(&testConfig{ + optsInputs: &testConfig{ endpoint: "http://test.com/v1/metrics", filters: "test", labels: map[string]string{ "test_label": "123", }, refreshInterval: 2 * time.Second, - }), + }, mockExpect: func(m *client.MockClient) { mockCfg, _ := testTelemetryCfg(&testConfig{ endpoint: "http://newendpoint/v1/metrics", @@ -127,154 +138,83 @@ func TestTelemetryConfigProvider_UpdateConfig(t *testing.T) { }) m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mockCfg, nil) }, - expected: testDynamicCfg(&testConfig{ + expected: &testConfig{ endpoint: "http://newendpoint/v1/metrics", filters: "consul", labels: map[string]string{ "new_label": "1234", }, refreshInterval: 2 * time.Second, - }), - expectedInterval: 2 * time.Second, - metricKey: testMetricKeySuccess, - }, - "newConfigMetricsDisabled": { - initCfg: testDynamicCfg(&testConfig{ - endpoint: "http://test.com/v1/metrics", - filters: "test", - labels: map[string]string{ - "test_label": "123", - }, - refreshInterval: 2 * time.Second, - }), - mockExpect: func(m *client.MockClient) { - mockCfg, _ := testTelemetryCfg(&testConfig{ - endpoint: "", - filters: "consul", - labels: map[string]string{ - "new_label": "1234", - }, - refreshInterval: 2 * time.Second, - disabled: true, - }) - m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mockCfg, nil) }, - expected: testDynamicCfg(&testConfig{ - endpoint: "", - filters: "consul", - labels: map[string]string{ - "new_label": "1234", - }, - refreshInterval: 2 * time.Second, - disabled: true, - }), - metricKey: testMetricKeySuccess, - expectedInterval: 2 * time.Second, + metricKey: testMetricKeySuccess, }, "sameConfigInvalidRefreshInterval": { - initCfg: testDynamicCfg(&testConfig{ + optsInputs: &testConfig{ endpoint: "http://test.com/v1/metrics", filters: "test", labels: map[string]string{ "test_label": "123", }, refreshInterval: testRefreshInterval, - }), + }, mockExpect: func(m *client.MockClient) { mockCfg, _ := testTelemetryCfg(&testConfig{ refreshInterval: 0 * time.Second, }) m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mockCfg, nil) }, - expected: testDynamicCfg(&testConfig{ + expected: &testConfig{ endpoint: "http://test.com/v1/metrics", labels: map[string]string{ "test_label": "123", }, filters: "test", refreshInterval: testRefreshInterval, - }), - metricKey: testMetricKeyFailure, - expectedInterval: 0, + }, + metricKey: testMetricKeyFailure, }, "sameConfigHCPClientFailure": { - initCfg: testDynamicCfg(&testConfig{ + optsInputs: &testConfig{ endpoint: "http://test.com/v1/metrics", filters: "test", labels: map[string]string{ "test_label": "123", }, refreshInterval: testRefreshInterval, - }), + }, mockExpect: func(m *client.MockClient) { m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(nil, fmt.Errorf("failure")) }, - expected: testDynamicCfg(&testConfig{ - endpoint: "http://test.com/v1/metrics", - filters: "test", - labels: map[string]string{ - "test_label": "123", - }, - refreshInterval: testRefreshInterval, - }), - metricKey: testMetricKeyFailure, - expectedInterval: 0, - }, - "disableMetrics404": { - initCfg: testDynamicCfg(&testConfig{ + expected: &testConfig{ endpoint: "http://test.com/v1/metrics", filters: "test", labels: map[string]string{ "test_label": "123", }, refreshInterval: testRefreshInterval, - }), - mockExpect: func(m *client.MockClient) { - err := runtime.NewAPIError("404 failure", nil, 404) - m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(nil, err) }, - expected: defaultDisabledCfg(), - metricKey: testMetricKeySuccess, - expectedInterval: defaultTelemetryConfigRefreshInterval, - }, - "hcpClientNotConfigured": { - skipHCPClient: true, - initCfg: testDynamicCfg(&testConfig{ - endpoint: "http://test.com/v1/metrics", - filters: "test", - labels: map[string]string{ - "test_label": "123", - }, - refreshInterval: testRefreshInterval, - }), - expected: defaultDisabledCfg(), - metricKey: testMetricKeySuccess, - expectedInterval: defaultTelemetryConfigRefreshInterval, + metricKey: testMetricKeyFailure, }, } { - tc := tc t.Run(name, func(t *testing.T) { sink := initGlobalSink() - var mockClient *client.MockClient - if !tc.skipHCPClient { - mockClient = client.NewMockClient(t) - tc.mockExpect(mockClient) - } + mockClient := client.NewMockClient(t) + tc.mockExpect(mockClient) + + dynamicCfg, err := testDynamicCfg(tc.optsInputs) + require.NoError(t, err) provider := &hcpProviderImpl{ hcpClient: mockClient, - cfg: tc.initCfg, - logger: hclog.NewNullLogger(), + cfg: dynamicCfg, } - newInterval := provider.updateConfig(context.Background()) - require.Equal(t, tc.expectedInterval, newInterval) + provider.getUpdate(context.Background()) // Verify endpoint provider returns correct config values. - require.Equal(t, tc.expected.endpoint, provider.GetEndpoint()) - require.Equal(t, tc.expected.filters, provider.GetFilters()) + require.Equal(t, tc.expected.endpoint, provider.GetEndpoint().String()) + require.Equal(t, tc.expected.filters, provider.GetFilters().String()) require.Equal(t, tc.expected.labels, provider.GetLabels()) - require.Equal(t, tc.expected.disabled, provider.IsDisabled()) // Verify count for transform success metric. interval := sink.Data()[0] @@ -286,212 +226,6 @@ func TestTelemetryConfigProvider_UpdateConfig(t *testing.T) { } } -func TestTelemetryConfigProvider_Start(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - provider := NewHCPProvider(ctx) - - testUpdateConfigCh := make(chan struct{}, 1) - provider.testUpdateConfigCh = testUpdateConfigCh - - // Configure mocks - mockClient := client.NewMockClient(t) - mTelemetryCfg, err := testTelemetryCfg(&testConfig{ - endpoint: "http://test.com/v1/metrics", - filters: "test", - labels: map[string]string{ - "test_label": "123", - }, - refreshInterval: testRefreshInterval, - }) - require.NoError(t, err) - mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mTelemetryCfg, nil) - mockHCPCfg := &config.MockCloudCfg{} - - // Run provider - go provider.Start(context.Background(), &HCPProviderCfg{ - HCPClient: mockClient, - HCPConfig: mockHCPCfg, - }) - - // Expect at least two update config calls to validate provider is running - // and has entered the main run loop - select { - case <-testUpdateConfigCh: - case <-time.After(time.Second): - require.Fail(t, "provider did not attempt to update config in expected time") - } - select { - case <-testUpdateConfigCh: - case <-time.After(time.Millisecond * 500): - require.Fail(t, "provider did not attempt to update config in expected time") - } - - mockClient.AssertExpectations(t) -} - -func TestTelemetryConfigProvider_MultipleRun(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - provider := NewHCPProvider(ctx) - - testUpdateConfigCh := make(chan struct{}, 1) - provider.testUpdateConfigCh = testUpdateConfigCh - - // Configure mocks - mockClient := client.NewMockClient(t) - mTelemetryCfg, err := testTelemetryCfg(&testConfig{ - refreshInterval: 30 * time.Minute, - }) - require.NoError(t, err) - mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mTelemetryCfg, nil) - mockHCPCfg := &config.MockCloudCfg{} - - // Run provider twice in parallel - go provider.Start(context.Background(), &HCPProviderCfg{ - HCPClient: mockClient, - HCPConfig: mockHCPCfg, - }) - go provider.Start(context.Background(), &HCPProviderCfg{ - HCPClient: mockClient, - HCPConfig: mockHCPCfg, - }) - - // Expect only one update config call - select { - case <-testUpdateConfigCh: - case <-time.After(time.Second): - require.Fail(t, "provider did not attempt to update config in expected time") - } - - select { - case <-testUpdateConfigCh: - require.Fail(t, "provider unexpectedly updated config") - case <-time.After(time.Second): - } - - // Try calling run again, should not update again - provider.Start(context.Background(), &HCPProviderCfg{ - HCPClient: mockClient, - HCPConfig: mockHCPCfg, - }) - - select { - case <-testUpdateConfigCh: - require.Fail(t, "provider unexpectedly updated config") - case <-time.After(time.Second): - } - - mockClient.AssertExpectations(t) -} - -func TestTelemetryConfigProvider_updateHTTPConfig(t *testing.T) { - for name, test := range map[string]struct { - wantErr string - cfg config.CloudConfigurer - }{ - "success": { - cfg: &config.MockCloudCfg{}, - }, - "failsWithoutCloudCfg": { - wantErr: "must provide valid HCP configuration", - cfg: nil, - }, - "failsHCPConfig": { - wantErr: "failed to configure telemetry HTTP client", - cfg: config.MockCloudCfg{ - ConfigErr: fmt.Errorf("test bad hcp config"), - }, - }, - "failsBadResource": { - wantErr: "failed set telemetry client headers", - cfg: config.MockCloudCfg{ - ResourceErr: fmt.Errorf("test bad resource"), - }, - }, - } { - t.Run(name, func(t *testing.T) { - provider := NewHCPProvider(context.Background()) - err := provider.updateHTTPConfig(test.cfg) - - if test.wantErr != "" { - require.Error(t, err) - require.Contains(t, err.Error(), test.wantErr) - return - } - - require.NoError(t, err) - require.NotNil(t, provider.GetHTTPClient()) - - expectedHeader := make(http.Header) - expectedHeader.Set("content-type", "application/x-protobuf") - expectedHeader.Set("x-hcp-resource-id", "organization/test-org/project/test-project/test-type/test-id") - expectedHeader.Set("x-channel", fmt.Sprintf("consul/%s", version.GetHumanVersion())) - require.Equal(t, expectedHeader, provider.GetHeader()) - }) - } -} - -func TestTelemetryConfigProvider_Stop(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - provider := NewHCPProvider(ctx) - - testUpdateConfigCh := make(chan struct{}, 1) - provider.testUpdateConfigCh = testUpdateConfigCh - - // Configure mocks - mockClient := client.NewMockClient(t) - mTelemetryCfg, err := testTelemetryCfg(&testConfig{ - endpoint: "http://test.com/v1/metrics", - filters: "test", - labels: map[string]string{ - "test_label": "123", - }, - refreshInterval: testRefreshInterval, - }) - require.NoError(t, err) - mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mTelemetryCfg, nil) - mockHCPCfg := &config.MockCloudCfg{} - - // Run provider - provider.Start(context.Background(), &HCPProviderCfg{ - HCPClient: mockClient, - HCPConfig: mockHCPCfg, - }) - - // Wait for at least two update config calls to ensure provider is running - // and has entered the main run loop - select { - case <-testUpdateConfigCh: - case <-time.After(time.Second): - require.Fail(t, "provider did not attempt to update config in expected time") - } - select { - case <-testUpdateConfigCh: - case <-time.After(time.Millisecond * 500): - require.Fail(t, "provider did not attempt to update config in expected time") - } - - // Stop the provider - provider.Stop() - require.Equal(t, defaultDisabledCfg(), provider.cfg) - select { - case <-testUpdateConfigCh: - require.Fail(t, "provider should not attempt to update config after stop") - case <-time.After(time.Second): - // Success, no updates have happened after stopping - } - - mockClient.AssertExpectations(t) -} - // mockRaceClient is a mock HCP client that fetches TelemetryConfig. // The mock TelemetryConfig returned can be manually updated at any time. // It manages concurrent read/write access to config with a sync.RWMutex. @@ -560,8 +294,7 @@ func TestTelemetryConfigProvider_Race(t *testing.T) { } // Start the provider goroutine, which fetches client TelemetryConfig every RefreshInterval. - provider := NewHCPProvider(ctx) - err = provider.Start(context.Background(), &HCPProviderCfg{m, config.MockCloudCfg{}}) + provider, err := NewHCPProvider(ctx, m, m.cfg) require.NoError(t, err) for count := 0; count < testRaceWriteSampleCount; count++ { @@ -570,7 +303,7 @@ func TestTelemetryConfigProvider_Race(t *testing.T) { require.NoError(t, err) // Force provider to obtain new client TelemetryConfig immediately. // This call is necessary to guarantee TelemetryConfig changes to assert on expected values below. - provider.updateConfig(context.Background()) + provider.getUpdate(context.Background()) // Start goroutines to access label configuration. wg := &sync.WaitGroup{} @@ -614,20 +347,22 @@ func initGlobalSink() *metrics.InmemSink { } // testDynamicCfg converts testConfig inputs to a dynamicConfig to be used in tests. -func testDynamicCfg(testCfg *testConfig) *dynamicConfig { - filters, _ := regexp.Compile(testCfg.filters) +func testDynamicCfg(testCfg *testConfig) (*dynamicConfig, error) { + filters, err := regexp.Compile(testCfg.filters) + if err != nil { + return nil, err + } - var endpoint *url.URL - if testCfg.endpoint != "" { - endpoint, _ = url.Parse(testCfg.endpoint) + endpoint, err := url.Parse(testCfg.endpoint) + if err != nil { + return nil, err } return &dynamicConfig{ - endpoint: endpoint, - filters: filters, - labels: testCfg.labels, - refreshInterval: testCfg.refreshInterval, - disabled: testCfg.disabled, - } + Endpoint: endpoint, + Filters: filters, + Labels: testCfg.labels, + RefreshInterval: testCfg.refreshInterval, + }, nil } // testTelemetryCfg converts testConfig inputs to a TelemetryConfig to be used in tests. @@ -637,21 +372,15 @@ func testTelemetryCfg(testCfg *testConfig) (*client.TelemetryConfig, error) { return nil, err } - var endpoint *url.URL - if testCfg.endpoint != "" { - u, err := url.Parse(testCfg.endpoint) - if err != nil { - return nil, err - } - endpoint = u + endpoint, err := url.Parse(testCfg.endpoint) + if err != nil { + return nil, err } - return &client.TelemetryConfig{ MetricsConfig: &client.MetricsConfig{ Endpoint: endpoint, Filters: filters, Labels: testCfg.labels, - Disabled: testCfg.disabled, }, RefreshConfig: &client.RefreshConfig{ RefreshInterval: testCfg.refreshInterval, diff --git a/agent/hcp/testing.go b/agent/hcp/testing.go index 1c0f364b0dd34..63bfd51106b11 100644 --- a/agent/hcp/testing.go +++ b/agent/hcp/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package hcp diff --git a/agent/hcp/testserver/main.go b/agent/hcp/testserver/main.go index e0db7670ef99f..ffdd4cac51afa 100644 --- a/agent/hcp/testserver/main.go +++ b/agent/hcp/testserver/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package main diff --git a/agent/health_endpoint.go b/agent/health_endpoint.go index 1ce464d91cd89..ea3d315f6b8b5 100644 --- a/agent/health_endpoint.go +++ b/agent/health_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/internal/dnsutil" ) const ( @@ -244,7 +243,7 @@ func (s *HTTPHandlers) healthServiceNodes(resp http.ResponseWriter, req *http.Re } // Translate addresses after filtering so we don't waste effort. - s.agent.TranslateAddresses(args.Datacenter, out.Nodes, dnsutil.TranslateAddressAcceptAny) + s.agent.TranslateAddresses(args.Datacenter, out.Nodes, TranslateAddressAcceptAny) // Use empty list instead of nil if out.Nodes == nil { diff --git a/agent/health_endpoint_test.go b/agent/health_endpoint_test.go index 86dd7be70f78f..4f1bd1c485457 100644 --- a/agent/health_endpoint_test.go +++ b/agent/health_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -28,25 +28,6 @@ import ( "github.com/hashicorp/consul/types" ) -func TestHealthEndpointsFailInV2(t *testing.T) { - t.Parallel() - - a := NewTestAgent(t, `experiments = ["resource-apis"]`) - - checkRequest := func(method, url string) { - t.Run(method+" "+url, func(t *testing.T) { - assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, "{}") - }) - } - - checkRequest("GET", "/v1/health/node/web") - checkRequest("GET", "/v1/health/checks/web") - checkRequest("GET", "/v1/health/state/web") - checkRequest("GET", "/v1/health/service/web") - checkRequest("GET", "/v1/health/connect/web") - checkRequest("GET", "/v1/health/ingress/web") -} - func TestHealthChecksInState(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -258,7 +239,7 @@ func TestHealthChecksInState_DistanceSort(t *testing.T) { if err != nil { r.Fatalf("err: %v", err) } - assertIndex(r, resp) + assertIndex(t, resp) nodes = obj.(structs.HealthChecks) if len(nodes) != 2 { r.Fatalf("bad: %v", nodes) @@ -461,21 +442,19 @@ func TestHealthServiceChecks_NodeMetaFilter(t *testing.T) { t.Fatalf("err: %v", err) } - retry.Run(t, func(r *retry.R) { - req, _ = http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1&node-meta=somekey:somevalue", nil) - resp = httptest.NewRecorder() - obj, err = a.srv.HealthServiceChecks(resp, req) - if err != nil { - r.Fatalf("err: %v", err) - } - assertIndex(r, resp) + req, _ = http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1&node-meta=somekey:somevalue", nil) + resp = httptest.NewRecorder() + obj, err = a.srv.HealthServiceChecks(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + assertIndex(t, resp) - // Should be 1 health check for consul - nodes = obj.(structs.HealthChecks) - if len(nodes) != 1 { - r.Fatalf("bad: %v", obj) - } - }) + // Should be 1 health check for consul + nodes = obj.(structs.HealthChecks) + if len(nodes) != 1 { + t.Fatalf("bad: %v", obj) + } } func TestHealthServiceChecks_Filtering(t *testing.T) { @@ -613,7 +592,7 @@ func TestHealthServiceChecks_DistanceSort(t *testing.T) { if err != nil { r.Fatalf("err: %v", err) } - assertIndex(r, resp) + assertIndex(t, resp) nodes = obj.(structs.HealthChecks) if len(nodes) != 2 { r.Fatalf("bad: %v", obj) @@ -1371,7 +1350,7 @@ func TestHealthServiceNodes_DistanceSort(t *testing.T) { if err != nil { r.Fatalf("err: %v", err) } - assertIndex(r, resp) + assertIndex(t, resp) nodes = obj.(structs.CheckServiceNodes) if len(nodes) != 2 { r.Fatalf("bad: %v", obj) diff --git a/agent/http.go b/agent/http.go index d828ed04c10ec..0e5e2ffdc5d21 100644 --- a/agent/http.go +++ b/agent/http.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -36,7 +36,6 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/uiserver" "github.com/hashicorp/consul/api" - resourcehttp "github.com/hashicorp/consul/internal/resource/http" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/proto/private/pbcommon" @@ -262,19 +261,6 @@ func (s *HTTPHandlers) handler() http.Handler { handlePProf("/debug/pprof/symbol", pprof.Symbol) handlePProf("/debug/pprof/trace", pprof.Trace) - resourceAPIPrefix := "/api" - mux.Handle(resourceAPIPrefix+"/", - http.StripPrefix(resourceAPIPrefix, - resourcehttp.NewHandler( - resourceAPIPrefix, - s.agent.delegate.ResourceServiceClient(), - s.agent.baseDeps.Registry, - s.parseToken, - s.agent.logger.Named(logging.HTTP), - ), - ), - ) - if s.IsUIEnabled() { // Note that we _don't_ support reloading ui_config.{enabled, content_dir, // content_path} since this only runs at initial startup. @@ -397,11 +383,6 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc } logURL = aclEndpointRE.ReplaceAllString(logURL, "$1$4") - rejectCatalogV1Endpoint := false - if s.agent.baseDeps.UseV2Resources() { - rejectCatalogV1Endpoint = isV1CatalogRequest(req.URL.Path) - } - if s.denylist.Block(req.URL.Path) { errMsg := "Endpoint is blocked by agent configuration" httpLogger.Error("Request error", @@ -463,14 +444,6 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc return strings.Contains(err.Error(), rate.ErrRetryLater.Error()) } - isUsingV2CatalogExperiment := func(err error) bool { - if err == nil { - return false - } - - return structs.IsErrUsingV2CatalogExperiment(err) - } - isMethodNotAllowed := func(err error) bool { _, ok := err.(MethodNotAllowedError) return ok @@ -506,10 +479,6 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc msg = s.Message() } - if isUsingV2CatalogExperiment(err) && !isHTTPError(err) { - err = newRejectV1RequestWhenV2EnabledError() - } - switch { case isForbidden(err): resp.WriteHeader(http.StatusForbidden) @@ -586,12 +555,7 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc if err == nil { // Invoke the handler - if rejectCatalogV1Endpoint { - obj = nil - err = s.rejectV1RequestWhenV2Enabled() - } else { - obj, err = handler(resp, req) - } + obj, err = handler(resp, req) } } contentType := "application/json" @@ -633,46 +597,6 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc } } -func isV1CatalogRequest(logURL string) bool { - switch { - case strings.HasPrefix(logURL, "/v1/catalog/"), - strings.HasPrefix(logURL, "/v1/health/"), - strings.HasPrefix(logURL, "/v1/config/"): - return true - - case strings.HasPrefix(logURL, "/v1/agent/token/"), - logURL == "/v1/agent/self", - logURL == "/v1/agent/host", - logURL == "/v1/agent/version", - logURL == "/v1/agent/reload", - logURL == "/v1/agent/monitor", - logURL == "/v1/agent/metrics", - logURL == "/v1/agent/metrics/stream", - logURL == "/v1/agent/members", - strings.HasPrefix(logURL, "/v1/agent/join/"), - logURL == "/v1/agent/leave", - strings.HasPrefix(logURL, "/v1/agent/force-leave/"), - logURL == "/v1/agent/connect/authorize", - logURL == "/v1/agent/connect/ca/roots", - strings.HasPrefix(logURL, "/v1/agent/connect/ca/leaf/"): - return false - - case strings.HasPrefix(logURL, "/v1/agent/"): - return true - - case logURL == "/v1/internal/acl/authorize", - logURL == "/v1/internal/service-virtual-ip", - logURL == "/v1/internal/ui/oidc-auth-methods", - strings.HasPrefix(logURL, "/v1/internal/ui/metrics-proxy/"): - return false - - case strings.HasPrefix(logURL, "/v1/internal/"): - return true - default: - return false - } -} - // marshalJSON marshals the object into JSON, respecting the user's pretty-ness // configuration. func (s *HTTPHandlers) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) { @@ -681,9 +605,7 @@ func (s *HTTPHandlers) marshalJSON(req *http.Request, obj interface{}) ([]byte, if err != nil { return nil, err } - if ok { - buf = append(buf, "\n"...) - } + buf = append(buf, "\n"...) return buf, nil } @@ -1149,20 +1071,6 @@ func (s *HTTPHandlers) parseToken(req *http.Request, token *string) { s.parseTokenWithDefault(req, token) } -func (s *HTTPHandlers) rejectV1RequestWhenV2Enabled() error { - if s.agent.baseDeps.UseV2Resources() { - return newRejectV1RequestWhenV2EnabledError() - } - return nil -} - -func newRejectV1RequestWhenV2EnabledError() error { - return HTTPError{ - StatusCode: http.StatusBadRequest, - Reason: structs.ErrUsingV2CatalogExperiment.Error(), - } -} - func sourceAddrFromRequest(req *http.Request) string { xff := req.Header.Get("X-Forwarded-For") forwardHosts := strings.Split(xff, ",") diff --git a/agent/http_ce.go b/agent/http_ce.go index 0259dc738fb4c..bce44a68498f0 100644 --- a/agent/http_ce.go +++ b/agent/http_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/http_ce_test.go b/agent/http_ce_test.go index ea1d2af61d7a4..bf085ca8c29c7 100644 --- a/agent/http_ce_test.go +++ b/agent/http_ce_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/http_decode_test.go b/agent/http_decode_test.go index 6aece784c0b2d..03d1b9191fa74 100644 --- a/agent/http_decode_test.go +++ b/agent/http_decode_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/http_register.go b/agent/http_register.go index 69820c4d7cfeb..b3f0dfea3f3a6 100644 --- a/agent/http_register.go +++ b/agent/http_register.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -26,9 +26,6 @@ func init() { registerEndpoint("/v1/acl/token", []string{"PUT"}, (*HTTPHandlers).ACLTokenCreate) registerEndpoint("/v1/acl/token/self", []string{"GET"}, (*HTTPHandlers).ACLTokenSelf) registerEndpoint("/v1/acl/token/", []string{"GET", "PUT", "DELETE"}, (*HTTPHandlers).ACLTokenCRUD) - registerEndpoint("/v1/acl/templated-policies", []string{"GET"}, (*HTTPHandlers).ACLTemplatedPoliciesList) - registerEndpoint("/v1/acl/templated-policy/name/", []string{"GET"}, (*HTTPHandlers).ACLTemplatedPolicyRead) - registerEndpoint("/v1/acl/templated-policy/preview/", []string{"POST"}, (*HTTPHandlers).ACLTemplatedPolicyPreview) registerEndpoint("/v1/agent/token/", []string{"PUT"}, (*HTTPHandlers).AgentToken) registerEndpoint("/v1/agent/self", []string{"GET"}, (*HTTPHandlers).AgentSelf) registerEndpoint("/v1/agent/host", []string{"GET"}, (*HTTPHandlers).AgentHost) @@ -86,7 +83,6 @@ func init() { registerEndpoint("/v1/internal/federation-states/mesh-gateways", []string{"GET"}, (*HTTPHandlers).FederationStateListMeshGateways) registerEndpoint("/v1/internal/federation-state/", []string{"GET"}, (*HTTPHandlers).FederationStateGet) registerEndpoint("/v1/discovery-chain/", []string{"GET", "POST"}, (*HTTPHandlers).DiscoveryChainRead) - registerEndpoint("/v1/exported-services", []string{"GET"}, (*HTTPHandlers).ExportedServices) registerEndpoint("/v1/event/fire/", []string{"PUT"}, (*HTTPHandlers).EventFire) registerEndpoint("/v1/event/list", []string{"GET"}, (*HTTPHandlers).EventList) registerEndpoint("/v1/health/node/", []string{"GET"}, (*HTTPHandlers).HealthNodeChecks) diff --git a/agent/http_test.go b/agent/http_test.go index 607061d8681f7..99100c5fbc8e8 100644 --- a/agent/http_test.go +++ b/agent/http_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -1628,7 +1628,7 @@ func TestAllowedNets(t *testing.T) { } // assertIndex tests that X-Consul-Index is set and non-zero -func assertIndex(t testutil.TestingTB, resp *httptest.ResponseRecorder) { +func assertIndex(t *testing.T, resp *httptest.ResponseRecorder) { t.Helper() require.NoError(t, checkIndex(resp)) } diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 4f0b188a0cc43..2353c5bdac2ee 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/intentions_endpoint_ce_test.go b/agent/intentions_endpoint_ce_test.go index 10a540bf1869c..fb6a47f5e53d4 100644 --- a/agent/intentions_endpoint_ce_test.go +++ b/agent/intentions_endpoint_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index b1309feb9d2cd..161b8b5139d54 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/keyring.go b/agent/keyring.go index f30680774b340..3d96880f03aaa 100644 --- a/agent/keyring.go +++ b/agent/keyring.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/keyring_test.go b/agent/keyring_test.go index 1a9332a8a7393..7ce5d2cd4b93b 100644 --- a/agent/keyring_test.go +++ b/agent/keyring_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/kvs_endpoint.go b/agent/kvs_endpoint.go index e60567cd5b807..d5ad8cabc3de8 100644 --- a/agent/kvs_endpoint.go +++ b/agent/kvs_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/kvs_endpoint_test.go b/agent/kvs_endpoint_test.go index 2b3563000815b..6ea5efced20d2 100644 --- a/agent/kvs_endpoint_test.go +++ b/agent/kvs_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/leafcert/cached_roots.go b/agent/leafcert/cached_roots.go index 4b0612416a549..aaf768a2fb8e0 100644 --- a/agent/leafcert/cached_roots.go +++ b/agent/leafcert/cached_roots.go @@ -7,12 +7,13 @@ import ( "context" "errors" - "github.com/hashicorp/consul/agent/cacheshim" + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/structs" ) // NewCachedRootsReader returns a RootsReader that sources data from the agent cache. -func NewCachedRootsReader(cache cacheshim.Cache, dc string) RootsReader { +func NewCachedRootsReader(cache *cache.Cache, dc string) RootsReader { return &agentCacheRootsReader{ cache: cache, datacenter: dc, @@ -20,7 +21,7 @@ func NewCachedRootsReader(cache cacheshim.Cache, dc string) RootsReader { } type agentCacheRootsReader struct { - cache cacheshim.Cache + cache *cache.Cache datacenter string } @@ -29,7 +30,7 @@ var _ RootsReader = (*agentCacheRootsReader)(nil) func (r *agentCacheRootsReader) Get() (*structs.IndexedCARoots, error) { // Background is fine here because this isn't a blocking query as no index is set. // Therefore this will just either be a cache hit or return once the non-blocking query returns. - rawRoots, _, err := r.cache.Get(context.Background(), cacheshim.ConnectCARootName, &structs.DCSpecificRequest{ + rawRoots, _, err := r.cache.Get(context.Background(), cachetype.ConnectCARootName, &structs.DCSpecificRequest{ Datacenter: r.datacenter, }) if err != nil { @@ -42,8 +43,8 @@ func (r *agentCacheRootsReader) Get() (*structs.IndexedCARoots, error) { return roots, nil } -func (r *agentCacheRootsReader) Notify(ctx context.Context, correlationID string, ch chan<- cacheshim.UpdateEvent) error { - return r.cache.Notify(ctx, cacheshim.ConnectCARootName, &structs.DCSpecificRequest{ +func (r *agentCacheRootsReader) Notify(ctx context.Context, correlationID string, ch chan<- cache.UpdateEvent) error { + return r.cache.Notify(ctx, cachetype.ConnectCARootName, &structs.DCSpecificRequest{ Datacenter: r.datacenter, }, correlationID, ch) } diff --git a/agent/leafcert/generate.go b/agent/leafcert/generate.go index 19dbdbbaf4bbb..9551e760b1fc2 100644 --- a/agent/leafcert/generate.go +++ b/agent/leafcert/generate.go @@ -11,6 +11,7 @@ import ( "time" "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib" ) @@ -230,15 +231,6 @@ func (m *Manager) generateNewLeaf( var ipAddresses []net.IP switch { - case req.WorkloadIdentity != "": - id = &connect.SpiffeIDWorkloadIdentity{ - TrustDomain: roots.TrustDomain, - Partition: req.TargetPartition(), - Namespace: req.TargetNamespace(), - WorkloadIdentity: req.WorkloadIdentity, - } - dnsNames = append(dnsNames, req.DNSSAN...) - case req.Service != "": id = &connect.SpiffeIDService{ Host: roots.TrustDomain, @@ -281,7 +273,7 @@ func (m *Manager) generateNewLeaf( dnsNames = append(dnsNames, connect.PeeringServerSAN(req.Datacenter, roots.TrustDomain)) default: - return nil, newState, errors.New("URI must be either workload identity, service, agent, server, or kind") + return nil, newState, errors.New("URI must be either service, agent, server, or kind") } // Create a new private key @@ -316,7 +308,7 @@ func (m *Manager) generateNewLeaf( reply, err := m.certSigner.SignCert(context.Background(), &args) if err != nil { - if err.Error() == structs.ErrRateLimited.Error() { + if err.Error() == consul.ErrRateLimited.Error() { if firstTime { // This was a first fetch - we have no good value in cache. In this case // we just return the error to the caller rather than rely on surprising diff --git a/agent/leafcert/leafcert.go b/agent/leafcert/leafcert.go index 34759b6fdc7a7..5b1cd6b9be3bd 100644 --- a/agent/leafcert/leafcert.go +++ b/agent/leafcert/leafcert.go @@ -15,7 +15,7 @@ import ( "golang.org/x/sync/singleflight" "golang.org/x/time/rate" - "github.com/hashicorp/consul/agent/cacheshim" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib/ttlcache" ) @@ -104,7 +104,7 @@ type Deps struct { type RootsReader interface { Get() (*structs.IndexedCARoots, error) - Notify(ctx context.Context, correlationID string, ch chan<- cacheshim.UpdateEvent) error + Notify(ctx context.Context, correlationID string, ch chan<- cache.UpdateEvent) error } type CertSigner interface { @@ -237,7 +237,7 @@ func (m *Manager) Stop() { // index is retrieved, the last known value (maybe nil) is returned. No // error is returned on timeout. This matches the behavior of Consul blocking // queries. -func (m *Manager) Get(ctx context.Context, req *ConnectCALeafRequest) (*structs.IssuedCert, cacheshim.ResultMeta, error) { +func (m *Manager) Get(ctx context.Context, req *ConnectCALeafRequest) (*structs.IssuedCert, cache.ResultMeta, error) { // Lightweight copy this object so that manipulating req doesn't race. dup := *req req = &dup @@ -254,10 +254,10 @@ func (m *Manager) Get(ctx context.Context, req *ConnectCALeafRequest) (*structs. return m.internalGet(ctx, req) } -func (m *Manager) internalGet(ctx context.Context, req *ConnectCALeafRequest) (*structs.IssuedCert, cacheshim.ResultMeta, error) { +func (m *Manager) internalGet(ctx context.Context, req *ConnectCALeafRequest) (*structs.IssuedCert, cache.ResultMeta, error) { key := req.Key() if key == "" { - return nil, cacheshim.ResultMeta{}, fmt.Errorf("a key is required") + return nil, cache.ResultMeta{}, fmt.Errorf("a key is required") } if req.MaxQueryTime <= 0 { @@ -310,7 +310,7 @@ func (m *Manager) internalGet(ctx context.Context, req *ConnectCALeafRequest) (* } if !shouldReplaceCert { - meta := cacheshim.ResultMeta{ + meta := cache.ResultMeta{ Index: existingIndex, } @@ -347,7 +347,7 @@ func (m *Manager) internalGet(ctx context.Context, req *ConnectCALeafRequest) (* // other words valid fetches should reset the error. See // https://github.com/hashicorp/consul/issues/4480. if !first && lastFetchErr != nil { - return existing, cacheshim.ResultMeta{Index: existingIndex}, lastFetchErr + return existing, cache.ResultMeta{Index: existingIndex}, lastFetchErr } notifyCh := m.triggerCertRefreshInGroup(req, cd) @@ -357,14 +357,14 @@ func (m *Manager) internalGet(ctx context.Context, req *ConnectCALeafRequest) (* select { case <-ctx.Done(): - return nil, cacheshim.ResultMeta{}, ctx.Err() + return nil, cache.ResultMeta{}, ctx.Err() case <-notifyCh: // Our fetch returned, retry the get from the cache. req.MustRevalidate = false case <-timeoutTimer.C: // Timeout on the cache read, just return whatever we have. - return existing, cacheshim.ResultMeta{Index: existingIndex}, nil + return existing, cache.ResultMeta{Index: existingIndex}, nil } } } diff --git a/agent/leafcert/leafcert_test.go b/agent/leafcert/leafcert_test.go index f23ecfef62f99..0b523523e4736 100644 --- a/agent/leafcert/leafcert_test.go +++ b/agent/leafcert/leafcert_test.go @@ -9,6 +9,7 @@ import ( "crypto/x509" "encoding/pem" "fmt" + "sync" "sync/atomic" "testing" "time" @@ -16,8 +17,9 @@ import ( "github.com/stretchr/testify/require" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/cacheshim" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" @@ -32,7 +34,7 @@ func TestManager_changingRoots(t *testing.T) { t.Parallel() - m, signer := NewTestManager(t, nil) + m, signer := testManager(t, nil) caRoot := signer.UpdateCA(t, nil) @@ -96,7 +98,7 @@ func TestManager_changingRootsJitterBetweenCalls(t *testing.T) { const TestOverrideCAChangeInitialDelay = 100 * time.Millisecond - m, signer := NewTestManager(t, func(cfg *Config) { + m, signer := testManager(t, func(cfg *Config) { // Override the root-change delay so we will timeout first. We can't set it to // a crazy high value otherwise we'll have to wait that long in the test to // see if it actually happens on subsequent calls. We instead reduce the @@ -224,7 +226,7 @@ func testObserveLeafCert[T any](m *Manager, req *ConnectCALeafRequest, cb func(* func TestManager_changingRootsBetweenBlockingCalls(t *testing.T) { t.Parallel() - m, signer := NewTestManager(t, nil) + m, signer := testManager(t, nil) caRoot := signer.UpdateCA(t, nil) @@ -295,7 +297,7 @@ func TestManager_CSRRateLimiting(t *testing.T) { t.Parallel() - m, signer := NewTestManager(t, func(cfg *Config) { + m, signer := testManager(t, func(cfg *Config) { // Each jitter window will be only 100 ms long to make testing quick but // highly likely not to fail based on scheduling issues. cfg.TestOverrideCAChangeInitialDelay = 100 * time.Millisecond @@ -307,13 +309,13 @@ func TestManager_CSRRateLimiting(t *testing.T) { // First call return rate limit error. This is important as it checks // behavior when cache is empty and we have to return a nil Value but need to // save state to do the right thing for retry. - structs.ErrRateLimited, // inc + consul.ErrRateLimited, // inc // Then succeed on second call nil, // Then be rate limited again on several further calls - structs.ErrRateLimited, // inc - structs.ErrRateLimited, // inc - // Then fine after that + consul.ErrRateLimited, // inc + consul.ErrRateLimited, // inc + // Then fine after that ) req := &ConnectCALeafRequest{ @@ -330,7 +332,7 @@ func TestManager_CSRRateLimiting(t *testing.T) { t.Fatal("shouldn't block longer than one jitter window for success") case result := <-getCh: require.Error(t, result.Err) - require.Equal(t, structs.ErrRateLimited.Error(), result.Err.Error()) + require.Equal(t, consul.ErrRateLimited.Error(), result.Err.Error()) } // Second call should return correct cert immediately. @@ -427,7 +429,7 @@ func TestManager_watchRootsDedupingMultipleCallers(t *testing.T) { t.Parallel() - m, signer := NewTestManager(t, nil) + m, signer := testManager(t, nil) caRoot := signer.UpdateCA(t, nil) @@ -575,7 +577,7 @@ func TestManager_expiringLeaf(t *testing.T) { t.Parallel() - m, signer := NewTestManager(t, nil) + m, signer := testManager(t, nil) caRoot := signer.UpdateCA(t, nil) @@ -635,7 +637,7 @@ func TestManager_expiringLeaf(t *testing.T) { func TestManager_DNSSANForService(t *testing.T) { t.Parallel() - m, signer := NewTestManager(t, nil) + m, signer := testManager(t, nil) _ = signer.UpdateCA(t, nil) @@ -667,7 +669,7 @@ func TestManager_workflow_good(t *testing.T) { const TestOverrideCAChangeInitialDelay = 1 * time.Nanosecond - m, signer := NewTestManager(t, func(cfg *Config) { + m, signer := testManager(t, func(cfg *Config) { cfg.TestOverrideCAChangeInitialDelay = TestOverrideCAChangeInitialDelay }) @@ -709,7 +711,7 @@ func TestManager_workflow_good(t *testing.T) { type reply struct { cert *structs.IssuedCert - meta cacheshim.ResultMeta + meta cache.ResultMeta err error } @@ -816,7 +818,7 @@ func TestManager_workflow_goodNotLocal(t *testing.T) { const TestOverrideCAChangeInitialDelay = 1 * time.Nanosecond - m, signer := NewTestManager(t, func(cfg *Config) { + m, signer := testManager(t, func(cfg *Config) { cfg.TestOverrideCAChangeInitialDelay = TestOverrideCAChangeInitialDelay }) @@ -933,7 +935,7 @@ func TestManager_workflow_nonBlockingQuery_after_blockingQuery_shouldNotBlock(t ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - m, signer := NewTestManager(t, nil) + m, signer := testManager(t, nil) _ = signer.UpdateCA(t, nil) @@ -1018,6 +1020,98 @@ func requireLeafValidUnderCA(t require.TestingT, issued *structs.IssuedCert, ca require.NoError(t, err) } +// testManager returns a *Manager that is pre-configured to use a mock RPC +// implementation that can sign certs, and an in-memory CA roots reader that +// interacts well with it. +func testManager(t *testing.T, mut func(*Config)) (*Manager, *testSigner) { + signer := newTestSigner(t, nil, nil) + + deps := Deps{ + Logger: testutil.Logger(t), + RootsReader: signer.RootsReader, + CertSigner: signer, + Config: Config{ + // Override the root-change spread so we don't have to wait up to 20 seconds + // to see root changes work. Can be changed back for specific tests that + // need to test this, Note it's not 0 since that used default but is + // effectively the same. + TestOverrideCAChangeInitialDelay: 1 * time.Microsecond, + }, + } + if mut != nil { + mut(&deps.Config) + } + + m := NewManager(deps) + t.Cleanup(m.Stop) + + return m, signer +} + +type testRootsReader struct { + mu sync.Mutex + index uint64 + roots *structs.IndexedCARoots + watcher chan struct{} +} + +func newTestRootsReader(t *testing.T) *testRootsReader { + r := &testRootsReader{ + watcher: make(chan struct{}), + } + t.Cleanup(func() { + r.mu.Lock() + watcher := r.watcher + r.mu.Unlock() + close(watcher) + }) + return r +} + +var _ RootsReader = (*testRootsReader)(nil) + +func (r *testRootsReader) Set(roots *structs.IndexedCARoots) { + r.mu.Lock() + oldWatcher := r.watcher + r.watcher = make(chan struct{}) + r.roots = roots + if roots == nil { + r.index = 1 + } else { + r.index = roots.Index + } + r.mu.Unlock() + + close(oldWatcher) +} + +func (r *testRootsReader) Get() (*structs.IndexedCARoots, error) { + r.mu.Lock() + defer r.mu.Unlock() + return r.roots, nil +} + +func (r *testRootsReader) Notify(ctx context.Context, correlationID string, ch chan<- cache.UpdateEvent) error { + r.mu.Lock() + watcher := r.watcher + r.mu.Unlock() + + go func() { + <-watcher + + r.mu.Lock() + defer r.mu.Unlock() + + ch <- cache.UpdateEvent{ + CorrelationID: correlationID, + Result: r.roots, + Meta: cache.ResultMeta{Index: r.index}, + Err: nil, + } + }() + return nil +} + type testGetResult struct { Index uint64 Value *structs.IssuedCert diff --git a/agent/leafcert/roots.go b/agent/leafcert/roots.go index 44fc0ff5b06cb..161b0d0a041c2 100644 --- a/agent/leafcert/roots.go +++ b/agent/leafcert/roots.go @@ -8,7 +8,7 @@ import ( "sync" "sync/atomic" - "github.com/hashicorp/consul/agent/cacheshim" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/structs" ) @@ -100,7 +100,7 @@ func (r *rootWatcher) rootWatcher(ctx context.Context) { atomic.AddUint32(&r.testStartCount, 1) defer atomic.AddUint32(&r.testStopCount, 1) - ch := make(chan cacheshim.UpdateEvent, 1) + ch := make(chan cache.UpdateEvent, 1) if err := r.rootsReader.Notify(ctx, "roots", ch); err != nil { // Trigger all inflight watchers. We don't pass the error, but they will diff --git a/agent/leafcert/leafcert_test_helpers.go b/agent/leafcert/signer_test.go similarity index 57% rename from agent/leafcert/leafcert_test_helpers.go rename to agent/leafcert/signer_test.go index 0779033dccfd4..ad385f8c72a1c 100644 --- a/agent/leafcert/leafcert_test_helpers.go +++ b/agent/leafcert/signer_test.go @@ -17,42 +17,12 @@ import ( "testing" "time" - "github.com/hashicorp/consul/agent/cacheshim" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/sdk/testutil" ) -// NewTestManager returns a *Manager that is pre-configured to use a mock RPC -// implementation that can sign certs, and an in-memory CA roots reader that -// interacts well with it. -func NewTestManager(t *testing.T, mut func(*Config)) (*Manager, *TestSigner) { - signer := newTestSigner(t, nil, nil) - - deps := Deps{ - Logger: testutil.Logger(t), - RootsReader: signer.RootsReader, - CertSigner: signer, - Config: Config{ - // Override the root-change spread so we don't have to wait up to 20 seconds - // to see root changes work. Can be changed back for specific tests that - // need to test this, Note it's not 0 since that used default but is - // effectively the same. - TestOverrideCAChangeInitialDelay: 1 * time.Microsecond, - }, - } - if mut != nil { - mut(&deps.Config) - } - - m := NewManager(deps) - t.Cleanup(m.Stop) - - return m, signer -} - -// TestSigner implements NetRPC and handles leaf signing operations -type TestSigner struct { +// testSigner implements NetRPC and handles leaf signing operations +type testSigner struct { caLock sync.Mutex ca *structs.CARoot prevRoots []*structs.CARoot // remember prior ones @@ -66,37 +36,37 @@ type TestSigner struct { signCallCapture []*structs.CASignRequest } -var _ CertSigner = (*TestSigner)(nil) +var _ CertSigner = (*testSigner)(nil) var ReplyWithExpiredCert = errors.New("reply with expired cert") -func newTestSigner(t *testing.T, idGenerator *atomic.Uint64, rootsReader *testRootsReader) *TestSigner { +func newTestSigner(t *testing.T, idGenerator *atomic.Uint64, rootsReader *testRootsReader) *testSigner { if idGenerator == nil { idGenerator = &atomic.Uint64{} } if rootsReader == nil { rootsReader = newTestRootsReader(t) } - s := &TestSigner{ + s := &testSigner{ IDGenerator: idGenerator, RootsReader: rootsReader, } return s } -func (s *TestSigner) SetSignCallErrors(errs ...error) { +func (s *testSigner) SetSignCallErrors(errs ...error) { s.signCallLock.Lock() defer s.signCallLock.Unlock() s.signCallErrors = append(s.signCallErrors, errs...) } -func (s *TestSigner) GetSignCallErrorCount() uint64 { +func (s *testSigner) GetSignCallErrorCount() uint64 { s.signCallLock.Lock() defer s.signCallLock.Unlock() return s.signCallErrorCount } -func (s *TestSigner) UpdateCA(t *testing.T, ca *structs.CARoot) *structs.CARoot { +func (s *testSigner) UpdateCA(t *testing.T, ca *structs.CARoot) *structs.CARoot { if ca == nil { ca = connect.TestCA(t, nil) } @@ -125,17 +95,17 @@ func (s *TestSigner) UpdateCA(t *testing.T, ca *structs.CARoot) *structs.CARoot return ca } -func (s *TestSigner) nextIndex() uint64 { +func (s *testSigner) nextIndex() uint64 { return s.IDGenerator.Add(1) } -func (s *TestSigner) getCA() *structs.CARoot { +func (s *testSigner) getCA() *structs.CARoot { s.caLock.Lock() defer s.caLock.Unlock() return s.ca } -func (s *TestSigner) GetCapture(idx int) *structs.CASignRequest { +func (s *testSigner) GetCapture(idx int) *structs.CASignRequest { s.signCallLock.Lock() defer s.signCallLock.Unlock() if len(s.signCallCapture) > idx { @@ -145,7 +115,7 @@ func (s *TestSigner) GetCapture(idx int) *structs.CASignRequest { return nil } -func (s *TestSigner) SignCert(ctx context.Context, req *structs.CASignRequest) (*structs.IssuedCert, error) { +func (s *testSigner) SignCert(ctx context.Context, req *structs.CASignRequest) (*structs.IssuedCert, error) { useExpiredCert := false s.signCallLock.Lock() s.signCallCapture = append(s.signCallCapture, req) @@ -180,17 +150,8 @@ func (s *TestSigner) SignCert(ctx context.Context, req *structs.CASignRequest) ( return nil, fmt.Errorf("error parsing CSR URI: %w", err) } - var isService bool - var serviceID *connect.SpiffeIDService - var workloadID *connect.SpiffeIDWorkloadIdentity - - switch spiffeID.(type) { - case *connect.SpiffeIDService: - isService = true - serviceID = spiffeID.(*connect.SpiffeIDService) - case *connect.SpiffeIDWorkloadIdentity: - workloadID = spiffeID.(*connect.SpiffeIDWorkloadIdentity) - default: + serviceID, isService := spiffeID.(*connect.SpiffeIDService) + if !isService { return nil, fmt.Errorf("unexpected spiffeID type %T", spiffeID) } @@ -270,97 +231,16 @@ func (s *TestSigner) SignCert(ctx context.Context, req *structs.CASignRequest) ( } index := s.nextIndex() - if isService { - // Service Spiffe ID case - return &structs.IssuedCert{ - SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber), - CertPEM: leafPEM, - Service: serviceID.Service, - ServiceURI: leafCert.URIs[0].String(), - ValidAfter: leafCert.NotBefore, - ValidBefore: leafCert.NotAfter, - RaftIndex: structs.RaftIndex{ - CreateIndex: index, - ModifyIndex: index, - }, - }, nil - } else { - // Workload identity Spiffe ID case - return &structs.IssuedCert{ - SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber), - CertPEM: leafPEM, - WorkloadIdentity: workloadID.WorkloadIdentity, - WorkloadIdentityURI: leafCert.URIs[0].String(), - ValidAfter: leafCert.NotBefore, - ValidBefore: leafCert.NotAfter, - RaftIndex: structs.RaftIndex{ - CreateIndex: index, - ModifyIndex: index, - }, - }, nil - } -} - -type testRootsReader struct { - mu sync.Mutex - index uint64 - roots *structs.IndexedCARoots - watcher chan struct{} -} - -func newTestRootsReader(t *testing.T) *testRootsReader { - r := &testRootsReader{ - watcher: make(chan struct{}), - } - t.Cleanup(func() { - r.mu.Lock() - watcher := r.watcher - r.mu.Unlock() - close(watcher) - }) - return r -} - -var _ RootsReader = (*testRootsReader)(nil) - -func (r *testRootsReader) Set(roots *structs.IndexedCARoots) { - r.mu.Lock() - oldWatcher := r.watcher - r.watcher = make(chan struct{}) - r.roots = roots - if roots == nil { - r.index = 1 - } else { - r.index = roots.Index - } - r.mu.Unlock() - - close(oldWatcher) -} - -func (r *testRootsReader) Get() (*structs.IndexedCARoots, error) { - r.mu.Lock() - defer r.mu.Unlock() - return r.roots, nil -} - -func (r *testRootsReader) Notify(ctx context.Context, correlationID string, ch chan<- cacheshim.UpdateEvent) error { - r.mu.Lock() - watcher := r.watcher - r.mu.Unlock() - - go func() { - <-watcher - - r.mu.Lock() - defer r.mu.Unlock() - - ch <- cacheshim.UpdateEvent{ - CorrelationID: correlationID, - Result: r.roots, - Meta: cacheshim.ResultMeta{Index: r.index}, - Err: nil, - } - }() - return nil + return &structs.IssuedCert{ + SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber), + CertPEM: leafPEM, + Service: serviceID.Service, + ServiceURI: leafCert.URIs[0].String(), + ValidAfter: leafCert.NotBefore, + ValidBefore: leafCert.NotAfter, + RaftIndex: structs.RaftIndex{ + CreateIndex: index, + ModifyIndex: index, + }, + }, nil } diff --git a/agent/leafcert/structs.go b/agent/leafcert/structs.go index 685756c8dc8c5..7ad11a0869a52 100644 --- a/agent/leafcert/structs.go +++ b/agent/leafcert/structs.go @@ -11,7 +11,7 @@ import ( "github.com/mitchellh/hashstructure" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/cacheshim" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/structs" ) @@ -31,27 +31,16 @@ type ConnectCALeafRequest struct { // The following flags indicate the entity we are requesting a cert for. // Only one of these must be specified. - WorkloadIdentity string // Given a WorkloadIdentity name, the request is for a SpiffeIDWorkload. - Service string // Given a Service name, not ID, the request is for a SpiffeIDService. - Agent string // Given an Agent name, not ID, the request is for a SpiffeIDAgent. - Kind structs.ServiceKind // Given "mesh-gateway", the request is for a SpiffeIDMeshGateway. No other kinds supported. - Server bool // If true, the request is for a SpiffeIDServer. + Service string // Given a Service name, not ID, the request is for a SpiffeIDService. + Agent string // Given an Agent name, not ID, the request is for a SpiffeIDAgent. + Kind structs.ServiceKind // Given "mesh-gateway", the request is for a SpiffeIDMeshGateway. No other kinds supported. + Server bool // If true, the request is for a SpiffeIDServer. } func (r *ConnectCALeafRequest) Key() string { r.EnterpriseMeta.Normalize() switch { - case r.WorkloadIdentity != "": - v, err := hashstructure.Hash([]any{ - r.WorkloadIdentity, - r.EnterpriseMeta, - r.DNSSAN, - r.IPSAN, - }, nil) - if err == nil { - return fmt.Sprintf("workloadidentity:%d", v) - } case r.Agent != "": v, err := hashstructure.Hash([]any{ r.Agent, @@ -105,8 +94,8 @@ func (req *ConnectCALeafRequest) TargetPartition() string { return req.PartitionOrDefault() } -func (r *ConnectCALeafRequest) CacheInfo() cacheshim.RequestInfo { - return cacheshim.RequestInfo{ +func (r *ConnectCALeafRequest) CacheInfo() cache.RequestInfo { + return cache.RequestInfo{ Token: r.Token, Key: r.Key(), Datacenter: r.Datacenter, diff --git a/agent/leafcert/watch.go b/agent/leafcert/watch.go index 93d027b90c124..fe745f916d14b 100644 --- a/agent/leafcert/watch.go +++ b/agent/leafcert/watch.go @@ -8,7 +8,7 @@ import ( "fmt" "time" - "github.com/hashicorp/consul/agent/cacheshim" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/lib" ) @@ -43,9 +43,9 @@ func (m *Manager) Notify( ctx context.Context, req *ConnectCALeafRequest, correlationID string, - ch chan<- cacheshim.UpdateEvent, + ch chan<- cache.UpdateEvent, ) error { - return m.NotifyCallback(ctx, req, correlationID, func(ctx context.Context, event cacheshim.UpdateEvent) { + return m.NotifyCallback(ctx, req, correlationID, func(ctx context.Context, event cache.UpdateEvent) { select { case ch <- event: case <-ctx.Done(): @@ -60,7 +60,7 @@ func (m *Manager) NotifyCallback( ctx context.Context, req *ConnectCALeafRequest, correlationID string, - cb cacheshim.Callback, + cb cache.Callback, ) error { if req.Key() == "" { return fmt.Errorf("a key is required") @@ -81,7 +81,7 @@ func (m *Manager) notifyBlockingQuery( ctx context.Context, req *ConnectCALeafRequest, correlationID string, - cb cacheshim.Callback, + cb cache.Callback, ) { // Always start at 0 index to deliver the initial (possibly currently cached // value). @@ -106,7 +106,7 @@ func (m *Manager) notifyBlockingQuery( // Check the index of the value returned in the cache entry to be sure it // changed if index == 0 || index < meta.Index { - cb(ctx, cacheshim.UpdateEvent{ + cb(ctx, cache.UpdateEvent{ CorrelationID: correlationID, Result: newValue, Meta: meta, diff --git a/agent/local/state.go b/agent/local/state.go index 67e72aece0b4d..6cd5b0c82a584 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package local diff --git a/agent/local/state_internal_test.go b/agent/local/state_internal_test.go index 61fc2c0273a80..ba68e20f287a6 100644 --- a/agent/local/state_internal_test.go +++ b/agent/local/state_internal_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package local diff --git a/agent/local/state_test.go b/agent/local/state_test.go index ced73201e72bd..4751352ec1c8f 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package local_test diff --git a/agent/local/testing.go b/agent/local/testing.go index 5e9ae15ac3763..5303cb6b0c4ac 100644 --- a/agent/local/testing.go +++ b/agent/local/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package local diff --git a/agent/log-drop/log-drop.go b/agent/log-drop/log-drop.go index ea08f88d890b0..54bc09c5a8c3b 100644 --- a/agent/log-drop/log-drop.go +++ b/agent/log-drop/log-drop.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package logdrop diff --git a/agent/log-drop/log-drop_test.go b/agent/log-drop/log-drop_test.go index c050a734be4a8..fdb61a059e525 100644 --- a/agent/log-drop/log-drop_test.go +++ b/agent/log-drop/log-drop_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package logdrop diff --git a/agent/metadata/build.go b/agent/metadata/build.go index 76a432d9a380b..b50fa96acc7af 100644 --- a/agent/metadata/build.go +++ b/agent/metadata/build.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package metadata diff --git a/agent/metadata/build_test.go b/agent/metadata/build_test.go index 888b9b0210c4b..4688db2e1850f 100644 --- a/agent/metadata/build_test.go +++ b/agent/metadata/build_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package metadata diff --git a/agent/metadata/server.go b/agent/metadata/server.go index 2e626787bdff7..64c9936909892 100644 --- a/agent/metadata/server.go +++ b/agent/metadata/server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package metadata diff --git a/agent/metadata/server_internal_test.go b/agent/metadata/server_internal_test.go index bb0561ab2d193..5f3d47724ee91 100644 --- a/agent/metadata/server_internal_test.go +++ b/agent/metadata/server_internal_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package metadata diff --git a/agent/metadata/server_test.go b/agent/metadata/server_test.go index 78b16f2599b2f..8ee63fa3b413f 100644 --- a/agent/metadata/server_test.go +++ b/agent/metadata/server_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package metadata_test diff --git a/agent/metrics.go b/agent/metrics.go index 58f9e3c829e10..d9294eb25cb95 100644 --- a/agent/metrics.go +++ b/agent/metrics.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/metrics/testing.go b/agent/metrics/testing.go index 3663d6834c16d..0fc3455ab5e70 100644 --- a/agent/metrics/testing.go +++ b/agent/metrics/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package metrics diff --git a/agent/metrics_test.go b/agent/metrics_test.go index fa1fc55aa288b..6f9517598e6ad 100644 --- a/agent/metrics_test.go +++ b/agent/metrics_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -43,12 +43,12 @@ func recordPromMetrics(t require.TestingT, a *TestAgent, respRec *httptest.Respo if tt, ok := t.(*testing.T); ok { tt.Helper() } - req, err := http.NewRequest("GET", "/v1/agent/metrics?format=prometheus", nil) require.NoError(t, err, "Failed to generate new http request.") - a.srv.h.ServeHTTP(respRec, req) - require.Equalf(t, 200, respRec.Code, "expected 200, got %d, body: %s", respRec.Code, respRec.Body.String()) + _, err = a.srv.AgentMetrics(respRec, req) + require.NoError(t, err, "Failed to serve agent metrics") + } func assertMetricExists(t *testing.T, respRec *httptest.ResponseRecorder, metric string) { @@ -513,7 +513,6 @@ func TestHTTPHandlers_AgentMetrics_WAL_Prometheus(t *testing.T) { require.Contains(r, out, metricsPrefix+"_raft_wal_stable_sets") require.Contains(r, out, metricsPrefix+"_raft_wal_tail_truncations") }) - }) t.Run("server without WAL enabled emits no WAL metrics", func(t *testing.T) { @@ -605,7 +604,6 @@ func TestHTTPHandlers_AgentMetrics_LogVerifier_Prometheus(t *testing.T) { testretry.Run(t, func(r *testretry.R) { respRec := httptest.NewRecorder() recordPromMetrics(r, a, respRec) - out := respRec.Body.String() require.Contains(r, out, metricsPrefix+"_raft_logstore_verifier_checkpoints_written") require.Contains(r, out, metricsPrefix+"_raft_logstore_verifier_dropped_reports") diff --git a/agent/mock/notify.go b/agent/mock/notify.go index 00dc9a3864a7b..1aa700b31d2d3 100644 --- a/agent/mock/notify.go +++ b/agent/mock/notify.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package mock diff --git a/agent/nodeid.go b/agent/nodeid.go index c2192fac9123c..1e5823aef7c6c 100644 --- a/agent/nodeid.go +++ b/agent/nodeid.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/nodeid_test.go b/agent/nodeid_test.go index 73ce601249db3..d48889bf6d8df 100644 --- a/agent/nodeid_test.go +++ b/agent/nodeid_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/notify.go b/agent/notify.go index 80a150b19413a..eec501f098ada 100644 --- a/agent/notify.go +++ b/agent/notify.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/notify_test.go b/agent/notify_test.go index f256ee319ce7d..fe08800ae2ec8 100644 --- a/agent/notify_test.go +++ b/agent/notify_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/operator_endpoint.go b/agent/operator_endpoint.go index f669c13bd5caa..099f3dcfe4b08 100644 --- a/agent/operator_endpoint.go +++ b/agent/operator_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/operator_endpoint_ce.go b/agent/operator_endpoint_ce.go index 3abe7bc9109b0..cf2ba2a2027d9 100644 --- a/agent/operator_endpoint_ce.go +++ b/agent/operator_endpoint_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/operator_endpoint_ce_test.go b/agent/operator_endpoint_ce_test.go index 7ab918c7cc253..f4de46f9050c9 100644 --- a/agent/operator_endpoint_ce_test.go +++ b/agent/operator_endpoint_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/operator_endpoint_test.go b/agent/operator_endpoint_test.go index ffe5c1a53abe6..4d90dbb2249ef 100644 --- a/agent/operator_endpoint_test.go +++ b/agent/operator_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/peering_endpoint.go b/agent/peering_endpoint.go index 2d5cab92be2e7..a1fbd009acc60 100644 --- a/agent/peering_endpoint.go +++ b/agent/peering_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/peering_endpoint_ce_test.go b/agent/peering_endpoint_ce_test.go index 57c3d5289e729..b0395ea968398 100644 --- a/agent/peering_endpoint_ce_test.go +++ b/agent/peering_endpoint_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/peering_endpoint_test.go b/agent/peering_endpoint_test.go index ba3b704b8b5ad..7ec63c1cd73eb 100644 --- a/agent/peering_endpoint_test.go +++ b/agent/peering_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/pool/conn.go b/agent/pool/conn.go index 24d4c2cba4dc9..45a2c09486177 100644 --- a/agent/pool/conn.go +++ b/agent/pool/conn.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package pool diff --git a/agent/pool/peek.go b/agent/pool/peek.go index b631134341d9a..d6557bb23db8c 100644 --- a/agent/pool/peek.go +++ b/agent/pool/peek.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package pool diff --git a/agent/pool/peek_test.go b/agent/pool/peek_test.go index 29fbe9d4ab72e..cb53c421f9c2e 100644 --- a/agent/pool/peek_test.go +++ b/agent/pool/peek_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package pool diff --git a/agent/pool/pool.go b/agent/pool/pool.go index d793dcd4a1dea..dbe71647633f1 100644 --- a/agent/pool/pool.go +++ b/agent/pool/pool.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package pool diff --git a/agent/prepared_query_endpoint.go b/agent/prepared_query_endpoint.go index 8a3f1f038eb0f..8d5f9fdc5313f 100644 --- a/agent/prepared_query_endpoint.go +++ b/agent/prepared_query_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -11,7 +11,6 @@ import ( cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/internal/dnsutil" ) // preparedQueryCreateResponse is used to wrap the query ID. @@ -163,7 +162,7 @@ func (s *HTTPHandlers) preparedQueryExecute(id string, resp http.ResponseWriter, // a query can fail over to a different DC than where the execute request // was sent to. That's why we use the reply's DC and not the one from // the args. - s.agent.TranslateAddresses(reply.Datacenter, reply.Nodes, dnsutil.TranslateAddressAcceptAny) + s.agent.TranslateAddresses(reply.Datacenter, reply.Nodes, TranslateAddressAcceptAny) // Use empty list instead of nil. if reply.Nodes == nil { diff --git a/agent/prepared_query_endpoint_test.go b/agent/prepared_query_endpoint_test.go index 07e4b8e68c70e..f96c43ad8b904 100644 --- a/agent/prepared_query_endpoint_test.go +++ b/agent/prepared_query_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/proxycfg-glue/config_entry.go b/agent/proxycfg-glue/config_entry.go index 3a79e228277d7..cd86d91e1e91f 100644 --- a/agent/proxycfg-glue/config_entry.go +++ b/agent/proxycfg-glue/config_entry.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/discovery_chain.go b/agent/proxycfg-glue/discovery_chain.go index 518467492d7cc..3b322e6b334f9 100644 --- a/agent/proxycfg-glue/discovery_chain.go +++ b/agent/proxycfg-glue/discovery_chain.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/discovery_chain_test.go b/agent/proxycfg-glue/discovery_chain_test.go index e4156667d36b7..60d48537c684b 100644 --- a/agent/proxycfg-glue/discovery_chain_test.go +++ b/agent/proxycfg-glue/discovery_chain_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/exported_peered_services.go b/agent/proxycfg-glue/exported_peered_services.go index 1b92600451710..8637891f1556b 100644 --- a/agent/proxycfg-glue/exported_peered_services.go +++ b/agent/proxycfg-glue/exported_peered_services.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/exported_peered_services_test.go b/agent/proxycfg-glue/exported_peered_services_test.go index 91b42323fab8a..a2b99d4d25b5f 100644 --- a/agent/proxycfg-glue/exported_peered_services_test.go +++ b/agent/proxycfg-glue/exported_peered_services_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/federation_state_list_mesh_gateways.go b/agent/proxycfg-glue/federation_state_list_mesh_gateways.go index c34303552c0a2..f5f32f1c01cba 100644 --- a/agent/proxycfg-glue/federation_state_list_mesh_gateways.go +++ b/agent/proxycfg-glue/federation_state_list_mesh_gateways.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/federation_state_list_mesh_gateways_test.go b/agent/proxycfg-glue/federation_state_list_mesh_gateways_test.go index baf477f4340b2..fd73e19aaf698 100644 --- a/agent/proxycfg-glue/federation_state_list_mesh_gateways_test.go +++ b/agent/proxycfg-glue/federation_state_list_mesh_gateways_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/gateway_services.go b/agent/proxycfg-glue/gateway_services.go index 555b1d5385359..24f4087eea817 100644 --- a/agent/proxycfg-glue/gateway_services.go +++ b/agent/proxycfg-glue/gateway_services.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/gateway_services_test.go b/agent/proxycfg-glue/gateway_services_test.go index ff89a62c92e45..eb853bd7b49e9 100644 --- a/agent/proxycfg-glue/gateway_services_test.go +++ b/agent/proxycfg-glue/gateway_services_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index b88c7d2a419ea..817e151b9500b 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue @@ -43,7 +43,7 @@ type Store interface { FederationStateList(ws memdb.WatchSet) (uint64, []*structs.FederationState, error) GatewayServices(ws memdb.WatchSet, gateway string, entMeta *acl.EnterpriseMeta) (uint64, structs.GatewayServices, error) IntentionMatchOne(ws memdb.WatchSet, entry structs.IntentionMatchEntry, matchType structs.IntentionMatchType, destinationType structs.IntentionTargetType) (uint64, structs.SimplifiedIntentions, error) - IntentionTopology(ws memdb.WatchSet, target structs.ServiceName, downstreams bool, defaultDecision bool, intentionTarget structs.IntentionTargetType) (uint64, structs.ServiceList, error) + IntentionTopology(ws memdb.WatchSet, target structs.ServiceName, downstreams bool, defaultDecision acl.EnforcementDecision, intentionTarget structs.IntentionTargetType) (uint64, structs.ServiceList, error) ReadResolvedServiceConfigEntries(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, upstreamIDs []structs.ServiceID, proxyMode structs.ProxyMode) (uint64, *configentry.ResolvedServiceConfigSet, error) ServiceDiscoveryChain(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, req discoverychain.CompileRequest) (uint64, *structs.CompiledDiscoveryChain, *configentry.DiscoveryChainSet, error) ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) diff --git a/agent/proxycfg-glue/health.go b/agent/proxycfg-glue/health.go index 6acf5b7023026..f0808da978ce9 100644 --- a/agent/proxycfg-glue/health.go +++ b/agent/proxycfg-glue/health.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/health_blocking.go b/agent/proxycfg-glue/health_blocking.go index 8ed384f837efa..3504e050aa707 100644 --- a/agent/proxycfg-glue/health_blocking.go +++ b/agent/proxycfg-glue/health_blocking.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/health_test.go b/agent/proxycfg-glue/health_test.go index 821e22a789085..6f5702ca19c87 100644 --- a/agent/proxycfg-glue/health_test.go +++ b/agent/proxycfg-glue/health_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/helpers_test.go b/agent/proxycfg-glue/helpers_test.go index 3c7eb5b7ad153..0d8bd8c9660d0 100644 --- a/agent/proxycfg-glue/helpers_test.go +++ b/agent/proxycfg-glue/helpers_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/intention_upstreams.go b/agent/proxycfg-glue/intention_upstreams.go index 846eb2a72b261..07a12c4ddb6c3 100644 --- a/agent/proxycfg-glue/intention_upstreams.go +++ b/agent/proxycfg-glue/intention_upstreams.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue @@ -10,7 +10,6 @@ import ( "github.com/hashicorp/consul/agent/cache" cachetype "github.com/hashicorp/consul/agent/cache-types" - "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/consul/watch" "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" @@ -34,21 +33,20 @@ func CacheIntentionUpstreamsDestination(c *cache.Cache) proxycfg.IntentionUpstre // ServerIntentionUpstreams satisfies the proxycfg.IntentionUpstreams interface // by sourcing upstreams for the given service, inferred from intentions, from // the server's state store. -func ServerIntentionUpstreams(deps ServerDataSourceDeps, defaultIntentionPolicy string) proxycfg.IntentionUpstreams { - return serverIntentionUpstreams{deps, structs.IntentionTargetService, defaultIntentionPolicy} +func ServerIntentionUpstreams(deps ServerDataSourceDeps) proxycfg.IntentionUpstreams { + return serverIntentionUpstreams{deps, structs.IntentionTargetService} } // ServerIntentionUpstreamsDestination satisfies the proxycfg.IntentionUpstreams // interface by sourcing upstreams for the given destination, inferred from // intentions, from the server's state store. -func ServerIntentionUpstreamsDestination(deps ServerDataSourceDeps, defaultIntentionPolicy string) proxycfg.IntentionUpstreams { - return serverIntentionUpstreams{deps, structs.IntentionTargetDestination, defaultIntentionPolicy} +func ServerIntentionUpstreamsDestination(deps ServerDataSourceDeps) proxycfg.IntentionUpstreams { + return serverIntentionUpstreams{deps, structs.IntentionTargetDestination} } type serverIntentionUpstreams struct { - deps ServerDataSourceDeps - target structs.IntentionTargetType - defaultIntentionPolicy string + deps ServerDataSourceDeps + target structs.IntentionTargetType } func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.ServiceSpecificRequest, correlationID string, ch chan<- proxycfg.UpdateEvent) error { @@ -60,10 +58,9 @@ func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.Servi if err != nil { return 0, nil, err } + defaultDecision := authz.IntentionDefaultAllow(nil) - defaultAllow := consul.DefaultIntentionAllow(authz, s.defaultIntentionPolicy) - - index, services, err := store.IntentionTopology(ws, target, false, defaultAllow, s.target) + index, services, err := store.IntentionTopology(ws, target, false, defaultDecision, s.target) if err != nil { return 0, nil, err } diff --git a/agent/proxycfg-glue/intention_upstreams_test.go b/agent/proxycfg-glue/intention_upstreams_test.go index b1c77aaf3ae99..3028524eb0f29 100644 --- a/agent/proxycfg-glue/intention_upstreams_test.go +++ b/agent/proxycfg-glue/intention_upstreams_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue @@ -66,7 +66,7 @@ func TestServerIntentionUpstreams(t *testing.T) { dataSource := ServerIntentionUpstreams(ServerDataSourceDeps{ ACLResolver: newStaticResolver(authz), GetStore: func() Store { return store }, - }, "") + }) ch := make(chan proxycfg.UpdateEvent) err := dataSource.Notify(ctx, &structs.ServiceSpecificRequest{ServiceName: serviceName}, "", ch) @@ -84,47 +84,6 @@ func TestServerIntentionUpstreams(t *testing.T) { require.Equal(t, "db", result.Services[0].Name) } -// Variant of TestServerIntentionUpstreams where a default allow intention policy -// returns "db" service as an IntentionUpstream even if there are no explicit -// intentions for "db". -func TestServerIntentionUpstreams_DefaultIntentionPolicy(t *testing.T) { - const serviceName = "web" - - var index uint64 - getIndex := func() uint64 { - index++ - return index - } - - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - store := state.NewStateStore(nil) - disableLegacyIntentions(t, store) - - require.NoError(t, store.EnsureRegistration(getIndex(), &structs.RegisterRequest{ - Node: "node-1", - Service: &structs.NodeService{ - Service: "db", - }, - })) - - // Ensures that "db" service will not be filtered due to ACLs - authz := policyAuthorizer(t, `service "db" { policy = "read" }`) - - dataSource := ServerIntentionUpstreams(ServerDataSourceDeps{ - ACLResolver: newStaticResolver(authz), - GetStore: func() Store { return store }, - }, "allow") - - ch := make(chan proxycfg.UpdateEvent) - require.NoError(t, dataSource.Notify(ctx, &structs.ServiceSpecificRequest{ServiceName: serviceName}, "", ch)) - - result := getEventResult[*structs.IndexedServiceList](t, ch) - require.Len(t, result.Services, 1) - require.Equal(t, "db", result.Services[0].Name) -} - func disableLegacyIntentions(t *testing.T, store *state.Store) { t.Helper() diff --git a/agent/proxycfg-glue/intentions.go b/agent/proxycfg-glue/intentions.go index f3186c6689ab9..5176054325962 100644 --- a/agent/proxycfg-glue/intentions.go +++ b/agent/proxycfg-glue/intentions.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/intentions_ce.go b/agent/proxycfg-glue/intentions_ce.go index e7efb978dff1c..bd1823adb192d 100644 --- a/agent/proxycfg-glue/intentions_ce.go +++ b/agent/proxycfg-glue/intentions_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package proxycfgglue diff --git a/agent/proxycfg-glue/intentions_test.go b/agent/proxycfg-glue/intentions_test.go index 0e1ab10918865..07d3a8067e34b 100644 --- a/agent/proxycfg-glue/intentions_test.go +++ b/agent/proxycfg-glue/intentions_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/internal_service_dump.go b/agent/proxycfg-glue/internal_service_dump.go index d1c701083d520..e41dc020b1d04 100644 --- a/agent/proxycfg-glue/internal_service_dump.go +++ b/agent/proxycfg-glue/internal_service_dump.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/internal_service_dump_test.go b/agent/proxycfg-glue/internal_service_dump_test.go index 1eba4c043828c..a6e6c3b028607 100644 --- a/agent/proxycfg-glue/internal_service_dump_test.go +++ b/agent/proxycfg-glue/internal_service_dump_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/leafcerts.go b/agent/proxycfg-glue/leafcerts.go index b805586a154a3..24631ffc31134 100644 --- a/agent/proxycfg-glue/leafcerts.go +++ b/agent/proxycfg-glue/leafcerts.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/peered_upstreams.go b/agent/proxycfg-glue/peered_upstreams.go index f345c26df572e..df38b3f0daf4d 100644 --- a/agent/proxycfg-glue/peered_upstreams.go +++ b/agent/proxycfg-glue/peered_upstreams.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/peered_upstreams_test.go b/agent/proxycfg-glue/peered_upstreams_test.go index 026fd67a3455f..b0e7c0d8f83c4 100644 --- a/agent/proxycfg-glue/peered_upstreams_test.go +++ b/agent/proxycfg-glue/peered_upstreams_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/peering_list.go b/agent/proxycfg-glue/peering_list.go index 6e7a78c707f15..219bf9b955298 100644 --- a/agent/proxycfg-glue/peering_list.go +++ b/agent/proxycfg-glue/peering_list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/peering_list_test.go b/agent/proxycfg-glue/peering_list_test.go index 575d161b4e5d3..f570dbbcc2a86 100644 --- a/agent/proxycfg-glue/peering_list_test.go +++ b/agent/proxycfg-glue/peering_list_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/resolved_service_config.go b/agent/proxycfg-glue/resolved_service_config.go index 11654cd767b2e..89611bbc07114 100644 --- a/agent/proxycfg-glue/resolved_service_config.go +++ b/agent/proxycfg-glue/resolved_service_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/resolved_service_config_test.go b/agent/proxycfg-glue/resolved_service_config_test.go index 248ab4eab363f..60d39eec205f0 100644 --- a/agent/proxycfg-glue/resolved_service_config_test.go +++ b/agent/proxycfg-glue/resolved_service_config_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/service_http_checks.go b/agent/proxycfg-glue/service_http_checks.go index 2d0a9dfcff51f..45521f712ae8b 100644 --- a/agent/proxycfg-glue/service_http_checks.go +++ b/agent/proxycfg-glue/service_http_checks.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/service_http_checks_test.go b/agent/proxycfg-glue/service_http_checks_test.go index 87bdfc7abe609..cfe28c7e89f9f 100644 --- a/agent/proxycfg-glue/service_http_checks_test.go +++ b/agent/proxycfg-glue/service_http_checks_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/service_list.go b/agent/proxycfg-glue/service_list.go index f4a9380df715a..418103aef6fed 100644 --- a/agent/proxycfg-glue/service_list.go +++ b/agent/proxycfg-glue/service_list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/service_list_test.go b/agent/proxycfg-glue/service_list_test.go index c6372aaf4ea1d..154c1300a3275 100644 --- a/agent/proxycfg-glue/service_list_test.go +++ b/agent/proxycfg-glue/service_list_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/trust_bundle.go b/agent/proxycfg-glue/trust_bundle.go index f623d2a5555c9..108e7ea9f9aec 100644 --- a/agent/proxycfg-glue/trust_bundle.go +++ b/agent/proxycfg-glue/trust_bundle.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-glue/trust_bundle_test.go b/agent/proxycfg-glue/trust_bundle_test.go index e2082e3d240be..da77e32a56e86 100644 --- a/agent/proxycfg-glue/trust_bundle_test.go +++ b/agent/proxycfg-glue/trust_bundle_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfgglue diff --git a/agent/proxycfg-sources/catalog/config_source.go b/agent/proxycfg-sources/catalog/config_source.go index 9ded9aa7fd4ed..3fbca88de5087 100644 --- a/agent/proxycfg-sources/catalog/config_source.go +++ b/agent/proxycfg-sources/catalog/config_source.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package catalog @@ -17,8 +17,6 @@ import ( "github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" - proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" - "github.com/hashicorp/consul/proto-public/pbresource" ) const source proxycfg.ProxySource = "catalog" @@ -50,13 +48,11 @@ func NewConfigSource(cfg Config) *ConfigSource { // Watch wraps the underlying proxycfg.Manager and dynamically registers // services from the catalog with it when requested by the xDS server. -func (m *ConfigSource) Watch(id *pbresource.ID, nodeName string, token string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxysnapshot.CancelFunc, error) { - // Create service ID - serviceID := structs.NewServiceID(id.Name, GetEnterpriseMetaFromResourceID(id)) +func (m *ConfigSource) Watch(serviceID structs.ServiceID, nodeName string, token string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.CancelFunc, error) { // If the service is registered to the local agent, use the LocalConfigSource // rather than trying to configure it from the catalog. if nodeName == m.NodeName && m.LocalState.ServiceExists(serviceID) { - return m.LocalConfigSource.Watch(id, nodeName, token) + return m.LocalConfigSource.Watch(serviceID, nodeName, token) } // Begin a session with the xDS session concurrency limiter. @@ -280,7 +276,7 @@ type Config struct { //go:generate mockery --name ConfigManager --inpackage type ConfigManager interface { - Watch(req proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) + Watch(req proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, proxycfg.CancelFunc) Register(proxyID proxycfg.ProxyID, service *structs.NodeService, source proxycfg.ProxySource, token string, overwrite bool) error Deregister(proxyID proxycfg.ProxyID, source proxycfg.ProxySource) } @@ -293,11 +289,10 @@ type Store interface { //go:generate mockery --name Watcher --inpackage type Watcher interface { - Watch(proxyID *pbresource.ID, nodeName string, token string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxysnapshot.CancelFunc, error) + Watch(proxyID structs.ServiceID, nodeName string, token string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.CancelFunc, error) } //go:generate mockery --name SessionLimiter --inpackage type SessionLimiter interface { BeginSession() (limiter.Session, error) - Run(ctx context.Context) } diff --git a/agent/proxycfg-sources/catalog/config_source_oss.go b/agent/proxycfg-sources/catalog/config_source_oss.go deleted file mode 100644 index 233ad64cee8fa..0000000000000 --- a/agent/proxycfg-sources/catalog/config_source_oss.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package catalog - -import ( - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -func GetEnterpriseMetaFromResourceID(id *pbresource.ID) *acl.EnterpriseMeta { - return acl.DefaultEnterpriseMeta() -} diff --git a/agent/proxycfg-sources/catalog/config_source_test.go b/agent/proxycfg-sources/catalog/config_source_test.go index 94d939e8cad5e..661fae9c082b1 100644 --- a/agent/proxycfg-sources/catalog/config_source_test.go +++ b/agent/proxycfg-sources/catalog/config_source_test.go @@ -1,10 +1,9 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package catalog import ( - "context" "errors" "testing" "time" @@ -20,9 +19,6 @@ import ( "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" - proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" - rtest "github.com/hashicorp/consul/internal/resource/resourcetest" - pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" ) func TestConfigSource_Success(t *testing.T) { @@ -79,15 +75,15 @@ func TestConfigSource_Success(t *testing.T) { }) t.Cleanup(mgr.Shutdown) - snapCh, termCh, cancelWatch1, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token) + snapCh, termCh, cancelWatch1, err := mgr.Watch(serviceID, nodeName, token) require.NoError(t, err) require.Equal(t, session1TermCh, termCh) // Expect Register to have been called with the proxy's inital port. select { case snap := <-snapCh: - require.Equal(t, 9999, snap.(*proxycfg.ConfigSnapshot).Port) - require.Equal(t, token, snap.(*proxycfg.ConfigSnapshot).ProxyID.Token) + require.Equal(t, 9999, snap.Port) + require.Equal(t, token, snap.ProxyID.Token) case <-time.After(100 * time.Millisecond): t.Fatal("timeout waiting for snapshot") } @@ -111,7 +107,7 @@ func TestConfigSource_Success(t *testing.T) { // Expect Register to have been called again with the proxy's new port. select { case snap := <-snapCh: - require.Equal(t, 8888, snap.(*proxycfg.ConfigSnapshot).Port) + require.Equal(t, 8888, snap.Port) case <-time.After(100 * time.Millisecond): t.Fatal("timeout waiting for snapshot") } @@ -130,13 +126,13 @@ func TestConfigSource_Success(t *testing.T) { require.Equal(t, map[string]any{ "local_connect_timeout_ms": 123, "max_inbound_connections": 321, - }, snap.(*proxycfg.ConfigSnapshot).Proxy.Config) + }, snap.Proxy.Config) case <-time.After(100 * time.Millisecond): t.Fatal("timeout waiting for snapshot") } // Start another watch. - _, termCh2, cancelWatch2, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token) + _, termCh2, cancelWatch2, err := mgr.Watch(serviceID, nodeName, token) require.NoError(t, err) require.Equal(t, session2TermCh, termCh2) @@ -170,7 +166,6 @@ func TestConfigSource_Success(t *testing.T) { func TestConfigSource_LocallyManagedService(t *testing.T) { serviceID := structs.NewServiceID("web-sidecar-proxy-1", nil) - proxyID := rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID() nodeName := "node-1" token := "token" @@ -178,8 +173,8 @@ func TestConfigSource_LocallyManagedService(t *testing.T) { localState.AddServiceWithChecks(&structs.NodeService{ID: serviceID.ID}, nil, "", false) localWatcher := NewMockWatcher(t) - localWatcher.On("Watch", proxyID, nodeName, token). - Return(make(<-chan proxysnapshot.ProxySnapshot), nil, proxysnapshot.CancelFunc(func() {}), nil) + localWatcher.On("Watch", serviceID, nodeName, token). + Return(make(<-chan *proxycfg.ConfigSnapshot), nil, proxycfg.CancelFunc(func() {}), nil) mgr := NewConfigSource(Config{ NodeName: nodeName, @@ -191,7 +186,7 @@ func TestConfigSource_LocallyManagedService(t *testing.T) { }) t.Cleanup(mgr.Shutdown) - _, _, _, err := mgr.Watch(proxyID, nodeName, token) + _, _, _, err := mgr.Watch(serviceID, nodeName, token) require.NoError(t, err) } @@ -213,12 +208,12 @@ func TestConfigSource_ErrorRegisteringService(t *testing.T) { })) var canceledWatch bool - cancel := proxysnapshot.CancelFunc(func() { canceledWatch = true }) + cancel := proxycfg.CancelFunc(func() { canceledWatch = true }) cfgMgr := NewMockConfigManager(t) cfgMgr.On("Watch", mock.Anything). - Return(make(<-chan proxysnapshot.ProxySnapshot), cancel) + Return(make(<-chan *proxycfg.ConfigSnapshot), cancel) cfgMgr.On("Register", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(errors.New("KABOOM")) @@ -238,7 +233,7 @@ func TestConfigSource_ErrorRegisteringService(t *testing.T) { }) t.Cleanup(mgr.Shutdown) - _, _, _, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token) + _, _, _, err := mgr.Watch(serviceID, nodeName, token) require.Error(t, err) require.True(t, canceledWatch, "watch should've been canceled") @@ -263,12 +258,12 @@ func TestConfigSource_NotProxyService(t *testing.T) { })) var canceledWatch bool - cancel := proxysnapshot.CancelFunc(func() { canceledWatch = true }) + cancel := proxycfg.CancelFunc(func() { canceledWatch = true }) cfgMgr := NewMockConfigManager(t) cfgMgr.On("Watch", mock.Anything). - Return(make(<-chan proxysnapshot.ProxySnapshot), cancel) + Return(make(<-chan *proxycfg.ConfigSnapshot), cancel) mgr := NewConfigSource(Config{ Manager: cfgMgr, @@ -279,7 +274,7 @@ func TestConfigSource_NotProxyService(t *testing.T) { }) t.Cleanup(mgr.Shutdown) - _, _, _, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token) + _, _, _, err := mgr.Watch(serviceID, nodeName, token) require.Error(t, err) require.Contains(t, err.Error(), "must be a sidecar proxy or gateway") require.True(t, canceledWatch, "watch should've been canceled") @@ -296,7 +291,7 @@ func TestConfigSource_SessionLimiterError(t *testing.T) { t.Cleanup(src.Shutdown) _, _, _, err := src.Watch( - rtest.Resource(pbmesh.ProxyConfigurationType, "web-sidecar-proxy-1").ID(), + structs.NewServiceID("web-sidecar-proxy-1", nil), "node-name", "token", ) @@ -314,9 +309,9 @@ func testConfigManager(t *testing.T, serviceID structs.ServiceID, nodeName strin Token: token, } - snapCh := make(chan proxysnapshot.ProxySnapshot, 1) + snapCh := make(chan *proxycfg.ConfigSnapshot, 1) cfgMgr.On("Watch", proxyID). - Return((<-chan proxysnapshot.ProxySnapshot)(snapCh), proxysnapshot.CancelFunc(func() {}), nil) + Return((<-chan *proxycfg.ConfigSnapshot)(snapCh), proxycfg.CancelFunc(func() {}), nil) cfgMgr.On("Register", mock.Anything, mock.Anything, source, token, false). Run(func(args mock.Arguments) { @@ -360,8 +355,6 @@ func (nullSessionLimiter) BeginSession() (limiter.Session, error) { return nullSession{}, nil } -func (nullSessionLimiter) Run(ctx context.Context) {} - type nullSession struct{} func (nullSession) End() {} diff --git a/agent/proxycfg-sources/catalog/mock_ConfigManager.go b/agent/proxycfg-sources/catalog/mock_ConfigManager.go index 37deffb022d8e..3ae51c5f6a95f 100644 --- a/agent/proxycfg-sources/catalog/mock_ConfigManager.go +++ b/agent/proxycfg-sources/catalog/mock_ConfigManager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.33.1. DO NOT EDIT. +// Code generated by mockery v2.15.0. DO NOT EDIT. package catalog @@ -6,8 +6,6 @@ import ( proxycfg "github.com/hashicorp/consul/agent/proxycfg" mock "github.com/stretchr/testify/mock" - proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" - structs "github.com/hashicorp/consul/agent/structs" ) @@ -36,39 +34,37 @@ func (_m *MockConfigManager) Register(proxyID proxycfg.ProxyID, service *structs } // Watch provides a mock function with given fields: req -func (_m *MockConfigManager) Watch(req proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) { +func (_m *MockConfigManager) Watch(req proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, proxycfg.CancelFunc) { ret := _m.Called(req) - var r0 <-chan proxysnapshot.ProxySnapshot - var r1 proxysnapshot.CancelFunc - if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc)); ok { - return rf(req) - } - if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan proxysnapshot.ProxySnapshot); ok { + var r0 <-chan *proxycfg.ConfigSnapshot + if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan *proxycfg.ConfigSnapshot); ok { r0 = rf(req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan proxysnapshot.ProxySnapshot) + r0 = ret.Get(0).(<-chan *proxycfg.ConfigSnapshot) } } - if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) proxysnapshot.CancelFunc); ok { + var r1 proxycfg.CancelFunc + if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) proxycfg.CancelFunc); ok { r1 = rf(req) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).(proxysnapshot.CancelFunc) + r1 = ret.Get(1).(proxycfg.CancelFunc) } } return r0, r1 } -// NewMockConfigManager creates a new instance of MockConfigManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockConfigManager(t interface { +type mockConstructorTestingTNewMockConfigManager interface { mock.TestingT Cleanup(func()) -}) *MockConfigManager { +} + +// NewMockConfigManager creates a new instance of MockConfigManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMockConfigManager(t mockConstructorTestingTNewMockConfigManager) *MockConfigManager { mock := &MockConfigManager{} mock.Mock.Test(t) diff --git a/agent/proxycfg-sources/catalog/mock_SessionLimiter.go b/agent/proxycfg-sources/catalog/mock_SessionLimiter.go index 39cd430f06d3d..3b7147cb064c6 100644 --- a/agent/proxycfg-sources/catalog/mock_SessionLimiter.go +++ b/agent/proxycfg-sources/catalog/mock_SessionLimiter.go @@ -1,10 +1,8 @@ -// Code generated by mockery v2.33.1. DO NOT EDIT. +// Code generated by mockery v2.15.0. DO NOT EDIT. package catalog import ( - context "context" - limiter "github.com/hashicorp/consul/agent/grpc-external/limiter" mock "github.com/stretchr/testify/mock" ) @@ -19,10 +17,6 @@ func (_m *MockSessionLimiter) BeginSession() (limiter.Session, error) { ret := _m.Called() var r0 limiter.Session - var r1 error - if rf, ok := ret.Get(0).(func() (limiter.Session, error)); ok { - return rf() - } if rf, ok := ret.Get(0).(func() limiter.Session); ok { r0 = rf() } else { @@ -31,6 +25,7 @@ func (_m *MockSessionLimiter) BeginSession() (limiter.Session, error) { } } + var r1 error if rf, ok := ret.Get(1).(func() error); ok { r1 = rf() } else { @@ -40,17 +35,13 @@ func (_m *MockSessionLimiter) BeginSession() (limiter.Session, error) { return r0, r1 } -// Run provides a mock function with given fields: ctx -func (_m *MockSessionLimiter) Run(ctx context.Context) { - _m.Called(ctx) +type mockConstructorTestingTNewMockSessionLimiter interface { + mock.TestingT + Cleanup(func()) } // NewMockSessionLimiter creates a new instance of MockSessionLimiter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockSessionLimiter(t interface { - mock.TestingT - Cleanup(func()) -}) *MockSessionLimiter { +func NewMockSessionLimiter(t mockConstructorTestingTNewMockSessionLimiter) *MockSessionLimiter { mock := &MockSessionLimiter{} mock.Mock.Test(t) diff --git a/agent/proxycfg-sources/catalog/mock_Watcher.go b/agent/proxycfg-sources/catalog/mock_Watcher.go index b77be5d98ea8f..d5ca046a40602 100644 --- a/agent/proxycfg-sources/catalog/mock_Watcher.go +++ b/agent/proxycfg-sources/catalog/mock_Watcher.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.33.1. DO NOT EDIT. +// Code generated by mockery v2.15.0. DO NOT EDIT. package catalog @@ -6,9 +6,9 @@ import ( limiter "github.com/hashicorp/consul/agent/grpc-external/limiter" mock "github.com/stretchr/testify/mock" - pbresource "github.com/hashicorp/consul/proto-public/pbresource" + proxycfg "github.com/hashicorp/consul/agent/proxycfg" - proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" + structs "github.com/hashicorp/consul/agent/structs" ) // MockWatcher is an autogenerated mock type for the Watcher type @@ -17,25 +17,20 @@ type MockWatcher struct { } // Watch provides a mock function with given fields: proxyID, nodeName, token -func (_m *MockWatcher) Watch(proxyID *pbresource.ID, nodeName string, token string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxysnapshot.CancelFunc, error) { +func (_m *MockWatcher) Watch(proxyID structs.ServiceID, nodeName string, token string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.CancelFunc, error) { ret := _m.Called(proxyID, nodeName, token) - var r0 <-chan proxysnapshot.ProxySnapshot - var r1 limiter.SessionTerminatedChan - var r2 proxysnapshot.CancelFunc - var r3 error - if rf, ok := ret.Get(0).(func(*pbresource.ID, string, string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxysnapshot.CancelFunc, error)); ok { - return rf(proxyID, nodeName, token) - } - if rf, ok := ret.Get(0).(func(*pbresource.ID, string, string) <-chan proxysnapshot.ProxySnapshot); ok { + var r0 <-chan *proxycfg.ConfigSnapshot + if rf, ok := ret.Get(0).(func(structs.ServiceID, string, string) <-chan *proxycfg.ConfigSnapshot); ok { r0 = rf(proxyID, nodeName, token) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan proxysnapshot.ProxySnapshot) + r0 = ret.Get(0).(<-chan *proxycfg.ConfigSnapshot) } } - if rf, ok := ret.Get(1).(func(*pbresource.ID, string, string) limiter.SessionTerminatedChan); ok { + var r1 limiter.SessionTerminatedChan + if rf, ok := ret.Get(1).(func(structs.ServiceID, string, string) limiter.SessionTerminatedChan); ok { r1 = rf(proxyID, nodeName, token) } else { if ret.Get(1) != nil { @@ -43,15 +38,17 @@ func (_m *MockWatcher) Watch(proxyID *pbresource.ID, nodeName string, token stri } } - if rf, ok := ret.Get(2).(func(*pbresource.ID, string, string) proxysnapshot.CancelFunc); ok { + var r2 proxycfg.CancelFunc + if rf, ok := ret.Get(2).(func(structs.ServiceID, string, string) proxycfg.CancelFunc); ok { r2 = rf(proxyID, nodeName, token) } else { if ret.Get(2) != nil { - r2 = ret.Get(2).(proxysnapshot.CancelFunc) + r2 = ret.Get(2).(proxycfg.CancelFunc) } } - if rf, ok := ret.Get(3).(func(*pbresource.ID, string, string) error); ok { + var r3 error + if rf, ok := ret.Get(3).(func(structs.ServiceID, string, string) error); ok { r3 = rf(proxyID, nodeName, token) } else { r3 = ret.Error(3) @@ -60,12 +57,13 @@ func (_m *MockWatcher) Watch(proxyID *pbresource.ID, nodeName string, token stri return r0, r1, r2, r3 } -// NewMockWatcher creates a new instance of MockWatcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockWatcher(t interface { +type mockConstructorTestingTNewMockWatcher interface { mock.TestingT Cleanup(func()) -}) *MockWatcher { +} + +// NewMockWatcher creates a new instance of MockWatcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMockWatcher(t mockConstructorTestingTNewMockWatcher) *MockWatcher { mock := &MockWatcher{} mock.Mock.Test(t) diff --git a/agent/proxycfg-sources/local/config_source.go b/agent/proxycfg-sources/local/config_source.go index 7b3a835fb819d..18b8a045c421b 100644 --- a/agent/proxycfg-sources/local/config_source.go +++ b/agent/proxycfg-sources/local/config_source.go @@ -1,15 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package local import ( "github.com/hashicorp/consul/agent/grpc-external/limiter" "github.com/hashicorp/consul/agent/proxycfg" - "github.com/hashicorp/consul/agent/proxycfg-sources/catalog" structs "github.com/hashicorp/consul/agent/structs" - proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" - "github.com/hashicorp/consul/proto-public/pbresource" ) // ConfigSource wraps a proxycfg.Manager to create watches on services @@ -23,9 +20,7 @@ func NewConfigSource(cfgMgr ConfigManager) *ConfigSource { return &ConfigSource{cfgMgr} } -func (m *ConfigSource) Watch(proxyID *pbresource.ID, nodeName string, _ string) (<-chan proxysnapshot.ProxySnapshot, - limiter.SessionTerminatedChan, proxysnapshot.CancelFunc, error) { - serviceID := structs.NewServiceID(proxyID.Name, catalog.GetEnterpriseMetaFromResourceID(proxyID)) +func (m *ConfigSource) Watch(serviceID structs.ServiceID, nodeName string, _ string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.CancelFunc, error) { watchCh, cancelWatch := m.manager.Watch(proxycfg.ProxyID{ ServiceID: serviceID, NodeName: nodeName, diff --git a/agent/proxycfg-sources/local/local.go b/agent/proxycfg-sources/local/local.go index 44867eb067515..92eefe1eb85f0 100644 --- a/agent/proxycfg-sources/local/local.go +++ b/agent/proxycfg-sources/local/local.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // Package local integrates the proxycfg Manager with the agent's local state. package local diff --git a/agent/proxycfg-sources/local/mock_ConfigManager.go b/agent/proxycfg-sources/local/mock_ConfigManager.go index e3b2d3a445872..8f2c8fc6c836c 100644 --- a/agent/proxycfg-sources/local/mock_ConfigManager.go +++ b/agent/proxycfg-sources/local/mock_ConfigManager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.33.1. DO NOT EDIT. +// Code generated by mockery v2.15.0. DO NOT EDIT. package local @@ -6,8 +6,6 @@ import ( proxycfg "github.com/hashicorp/consul/agent/proxycfg" mock "github.com/stretchr/testify/mock" - proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" - structs "github.com/hashicorp/consul/agent/structs" ) @@ -52,39 +50,37 @@ func (_m *MockConfigManager) RegisteredProxies(source proxycfg.ProxySource) []pr } // Watch provides a mock function with given fields: id -func (_m *MockConfigManager) Watch(id proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) { +func (_m *MockConfigManager) Watch(id proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, proxycfg.CancelFunc) { ret := _m.Called(id) - var r0 <-chan proxysnapshot.ProxySnapshot - var r1 proxysnapshot.CancelFunc - if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc)); ok { - return rf(id) - } - if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan proxysnapshot.ProxySnapshot); ok { + var r0 <-chan *proxycfg.ConfigSnapshot + if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan *proxycfg.ConfigSnapshot); ok { r0 = rf(id) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan proxysnapshot.ProxySnapshot) + r0 = ret.Get(0).(<-chan *proxycfg.ConfigSnapshot) } } - if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) proxysnapshot.CancelFunc); ok { + var r1 proxycfg.CancelFunc + if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) proxycfg.CancelFunc); ok { r1 = rf(id) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).(proxysnapshot.CancelFunc) + r1 = ret.Get(1).(proxycfg.CancelFunc) } } return r0, r1 } -// NewMockConfigManager creates a new instance of MockConfigManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockConfigManager(t interface { +type mockConstructorTestingTNewMockConfigManager interface { mock.TestingT Cleanup(func()) -}) *MockConfigManager { +} + +// NewMockConfigManager creates a new instance of MockConfigManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMockConfigManager(t mockConstructorTestingTNewMockConfigManager) *MockConfigManager { mock := &MockConfigManager{} mock.Mock.Test(t) diff --git a/agent/proxycfg-sources/local/sync.go b/agent/proxycfg-sources/local/sync.go index 54d95e6594f24..86427c9f005ba 100644 --- a/agent/proxycfg-sources/local/sync.go +++ b/agent/proxycfg-sources/local/sync.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package local @@ -7,8 +7,6 @@ import ( "context" "time" - proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/consul/agent/local" @@ -36,9 +34,6 @@ type SyncConfig struct { // NodeName is the name of the local agent node. NodeName string - // NodeLocality - NodeLocality *structs.Locality - // Logger will be used to write log messages. Logger hclog.Logger @@ -114,14 +109,6 @@ func sync(cfg SyncConfig) { Token: "", } - // We inherit the node's locality at runtime (not persisted). - // The service locality takes precedence if it was set directly during - // registration. - svc = svc.DeepCopy() - if svc.Locality == nil { - svc.Locality = cfg.NodeLocality - } - // TODO(banks): need to work out when to default some stuff. For example // Proxy.LocalServicePort is practically necessary for any sidecar and can // default to the port of the sidecar service, but only if it's already @@ -148,7 +135,7 @@ func sync(cfg SyncConfig) { //go:generate mockery --name ConfigManager --inpackage type ConfigManager interface { - Watch(id proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) + Watch(id proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, proxycfg.CancelFunc) Register(proxyID proxycfg.ProxyID, service *structs.NodeService, source proxycfg.ProxySource, token string, overwrite bool) error Deregister(proxyID proxycfg.ProxyID, source proxycfg.ProxySource) RegisteredProxies(source proxycfg.ProxySource) []proxycfg.ProxyID diff --git a/agent/proxycfg-sources/local/sync_test.go b/agent/proxycfg-sources/local/sync_test.go index b20787140df57..8fa4883518197 100644 --- a/agent/proxycfg-sources/local/sync_test.go +++ b/agent/proxycfg-sources/local/sync_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package local @@ -72,12 +72,8 @@ func TestSync(t *testing.T) { go Sync(ctx, SyncConfig{ Manager: cfgMgr, State: state, - NodeLocality: &structs.Locality{ - Region: "some-region", - Zone: "some-zone", - }, - Tokens: tokens, - Logger: hclog.NewNullLogger(), + Tokens: tokens, + Logger: hclog.NewNullLogger(), }) // Expect the service in the local state to be registered. @@ -111,13 +107,6 @@ func TestSync(t *testing.T) { select { case reg := <-registerCh: require.Equal(t, serviceID, reg.service.ID) - require.Equal(t, - &structs.Locality{ - Region: "some-region", - Zone: "some-zone", - }, - reg.service.Locality, - ) require.Equal(t, userToken, reg.token) case <-time.After(100 * time.Millisecond): t.Fatal("timeout waiting for service to be registered") diff --git a/agent/proxycfg/api_gateway.go b/agent/proxycfg/api_gateway.go index 43798239a3535..b4954cd3973c7 100644 --- a/agent/proxycfg/api_gateway.go +++ b/agent/proxycfg/api_gateway.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg @@ -54,11 +54,6 @@ func (h *handlerAPIGateway) initialize(ctx context.Context) (ConfigSnapshot, err return snap, err } - err = watchJWTProviders(ctx, h) - if err != nil { - return snap, err - } - snap.APIGateway.Listeners = make(map[string]structs.APIGatewayListener) snap.APIGateway.BoundListeners = make(map[string]structs.BoundAPIGatewayListener) snap.APIGateway.HTTPRoutes = watch.NewMap[structs.ResourceReference, *structs.HTTPRouteConfigEntry]() @@ -102,33 +97,27 @@ func (h *handlerAPIGateway) handleUpdate(ctx context.Context, u UpdateEvent, sna return fmt.Errorf("error filling agent cache: %v", u.Err) } - switch u.CorrelationID { - case rootsWatchID: + switch { + case u.CorrelationID == rootsWatchID: // Handle change in the CA roots if err := h.handleRootCAUpdate(u, snap); err != nil { return err } - case apiGatewayConfigWatchID, boundGatewayConfigWatchID: + case u.CorrelationID == apiGatewayConfigWatchID || u.CorrelationID == boundGatewayConfigWatchID: // Handle change in the api-gateway or bound-api-gateway config entry if err := h.handleGatewayConfigUpdate(ctx, u, snap, u.CorrelationID); err != nil { return err } - case inlineCertificateConfigWatchID: + case u.CorrelationID == inlineCertificateConfigWatchID: // Handle change in an attached inline-certificate config entry if err := h.handleInlineCertConfigUpdate(ctx, u, snap); err != nil { return err } - case routeConfigWatchID: + case u.CorrelationID == routeConfigWatchID: // Handle change in an attached http-route or tcp-route config entry if err := h.handleRouteConfigUpdate(ctx, u, snap); err != nil { return err } - case jwtProviderID: - err := setJWTProvider(u, snap) - if err != nil { - return err - } - default: if err := (*handlerUpstreams)(h).handleUpdateUpstreams(ctx, u, snap); err != nil { return err diff --git a/agent/proxycfg/api_gateway_ce.go b/agent/proxycfg/api_gateway_ce.go deleted file mode 100644 index c9ef2383979ef..0000000000000 --- a/agent/proxycfg/api_gateway_ce.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package proxycfg - -import "context" - -func watchJWTProviders(cxt context.Context, h *handlerAPIGateway) error { - return nil -} - -func setJWTProvider(u UpdateEvent, snap *ConfigSnapshot) error { - return nil -} diff --git a/agent/proxycfg/config_snapshot_glue.go b/agent/proxycfg/config_snapshot_glue.go deleted file mode 100644 index 6355e0595ec69..0000000000000 --- a/agent/proxycfg/config_snapshot_glue.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package proxycfg - -import ( - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/logging" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// The below functions are added to ConfigSnapshot to allow it to conform to -// the ProxySnapshot interface. -func (s *ConfigSnapshot) AllowEmptyListeners() bool { - // Ingress and API gateways are allowed to inform LDS of no listeners. - return s.Kind == structs.ServiceKindIngressGateway || - s.Kind == structs.ServiceKindAPIGateway -} - -func (s *ConfigSnapshot) AllowEmptyRoutes() bool { - // Ingress and API gateways are allowed to inform RDS of no routes. - return s.Kind == structs.ServiceKindIngressGateway || - s.Kind == structs.ServiceKindAPIGateway -} - -func (s *ConfigSnapshot) AllowEmptyClusters() bool { - // Mesh, Ingress, API and Terminating gateways are allowed to inform CDS of no clusters. - return s.Kind == structs.ServiceKindMeshGateway || - s.Kind == structs.ServiceKindTerminatingGateway || - s.Kind == structs.ServiceKindIngressGateway || - s.Kind == structs.ServiceKindAPIGateway -} - -func (s *ConfigSnapshot) Authorize(authz acl.Authorizer) error { - var authzContext acl.AuthorizerContext - switch s.Kind { - case structs.ServiceKindConnectProxy: - s.ProxyID.EnterpriseMeta.FillAuthzContext(&authzContext) - if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(s.Proxy.DestinationServiceName, &authzContext); err != nil { - return status.Errorf(codes.PermissionDenied, err.Error()) - } - case structs.ServiceKindMeshGateway, structs.ServiceKindTerminatingGateway, structs.ServiceKindIngressGateway, structs.ServiceKindAPIGateway: - s.ProxyID.EnterpriseMeta.FillAuthzContext(&authzContext) - if err := authz.ToAllowAuthorizer().ServiceWriteAllowed(s.Service, &authzContext); err != nil { - return status.Errorf(codes.PermissionDenied, err.Error()) - } - default: - return status.Errorf(codes.Internal, "Invalid service kind") - } - - // Authed OK! - return nil -} - -func (s *ConfigSnapshot) LoggerName() string { - switch s.Kind { - case structs.ServiceKindConnectProxy: - case structs.ServiceKindTerminatingGateway: - return logging.TerminatingGateway - case structs.ServiceKindMeshGateway: - return logging.MeshGateway - case structs.ServiceKindIngressGateway: - return logging.IngressGateway - } - - return "" -} diff --git a/agent/proxycfg/config_snapshot_glue_test.go b/agent/proxycfg/config_snapshot_glue_test.go deleted file mode 100644 index ed7a9afc1ffbe..0000000000000 --- a/agent/proxycfg/config_snapshot_glue_test.go +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package proxycfg - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/structs" -) - -func TestConfigSnapshot_AllowEmptyClusters(t *testing.T) { - type testCase struct { - description string - cfgSnapshot *ConfigSnapshot - expectedResult bool - } - testsCases := []testCase{ - { - description: "Mesh proxies are not allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindConnectProxy}, - expectedResult: false, - }, - { - description: "Ingress gateways are allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindIngressGateway}, - expectedResult: true, - }, - { - description: "Terminating gateways are allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindTerminatingGateway}, - expectedResult: true, - }, - { - description: "API Gateways are allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindAPIGateway}, - expectedResult: true, - }, - { - description: "Mesh Gateways are allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindMeshGateway}, - expectedResult: true, - }, - } - for _, tc := range testsCases { - t.Run(tc.description, func(t *testing.T) { - require.Equal(t, tc.expectedResult, tc.cfgSnapshot.AllowEmptyClusters()) - }) - } -} - -func TestConfigSnapshot_AllowEmptyListeners(t *testing.T) { - type testCase struct { - description string - cfgSnapshot *ConfigSnapshot - expectedResult bool - } - testsCases := []testCase{ - { - description: "Mesh proxies are not allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindConnectProxy}, - expectedResult: false, - }, - { - description: "Ingress gateways are allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindIngressGateway}, - expectedResult: true, - }, - { - description: "Terminating gateways are not allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindTerminatingGateway}, - expectedResult: false, - }, - { - description: "API Gateways are allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindAPIGateway}, - expectedResult: true, - }, - { - description: "Mesh Gateways are not allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindMeshGateway}, - expectedResult: false, - }, - } - for _, tc := range testsCases { - t.Run(tc.description, func(t *testing.T) { - require.Equal(t, tc.expectedResult, tc.cfgSnapshot.AllowEmptyListeners()) - }) - } -} - -func TestConfigSnapshot_AllowEmptyRoutes(t *testing.T) { - type testCase struct { - description string - cfgSnapshot *ConfigSnapshot - expectedResult bool - } - testsCases := []testCase{ - { - description: "Mesh proxies are not allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindConnectProxy}, - expectedResult: false, - }, - { - description: "Ingress gateways are allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindIngressGateway}, - expectedResult: true, - }, - { - description: "Terminating gateways are not allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindTerminatingGateway}, - expectedResult: false, - }, - { - description: "API Gateways are allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindAPIGateway}, - expectedResult: true, - }, - { - description: "Mesh Gateways are not allowed", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindMeshGateway}, - expectedResult: false, - }, - } - for _, tc := range testsCases { - t.Run(tc.description, func(t *testing.T) { - require.Equal(t, tc.expectedResult, tc.cfgSnapshot.AllowEmptyRoutes()) - }) - } -} - -func TestConfigSnapshot_LoggerName(t *testing.T) { - type testCase struct { - description string - cfgSnapshot *ConfigSnapshot - expectedResult string - } - testsCases := []testCase{ - { - description: "Mesh proxies have a logger named ''", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindConnectProxy}, - expectedResult: "", - }, - { - description: "Ingress gateways have a logger named 'ingress_gateway'", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindIngressGateway}, - expectedResult: "ingress_gateway", - }, - { - description: "Terminating gateways have a logger named 'terminating_gateway'", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindTerminatingGateway}, - expectedResult: "terminating_gateway", - }, - { - description: "API Gateways have a logger named ''", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindAPIGateway}, - expectedResult: "", - }, - { - description: "Mesh Gateways have a logger named 'mesh_gateway'", - cfgSnapshot: &ConfigSnapshot{Kind: structs.ServiceKindMeshGateway}, - expectedResult: "mesh_gateway", - }, - } - for _, tc := range testsCases { - t.Run(tc.description, func(t *testing.T) { - require.Equal(t, tc.expectedResult, tc.cfgSnapshot.LoggerName()) - }) - } -} - -func TestConfigSnapshot_Authorize(t *testing.T) { - type testCase struct { - description string - cfgSnapshot *ConfigSnapshot - configureAuthorizer func(authorizer *acl.MockAuthorizer) - expectedErrorMessage string - } - testsCases := []testCase{ - { - description: "ConnectProxy - if service write is allowed for the DestinationService then allow.", - cfgSnapshot: &ConfigSnapshot{ - Kind: structs.ServiceKindConnectProxy, - Proxy: structs.ConnectProxyConfig{ - DestinationServiceName: "DestinationServiceName", - }, - }, - expectedErrorMessage: "", - configureAuthorizer: func(authz *acl.MockAuthorizer) { - authz.On("ServiceWrite", "DestinationServiceName", mock.Anything).Return(acl.Allow) - }, - }, - { - description: "ConnectProxy - if service write is not allowed for the DestinationService then deny.", - cfgSnapshot: &ConfigSnapshot{ - Kind: structs.ServiceKindConnectProxy, - Proxy: structs.ConnectProxyConfig{ - DestinationServiceName: "DestinationServiceName", - }, - }, - expectedErrorMessage: "rpc error: code = PermissionDenied desc = Permission denied: token with AccessorID '' lacks permission 'service:write' on \"DestinationServiceName\"", - configureAuthorizer: func(authz *acl.MockAuthorizer) { - authz.On("ServiceWrite", "DestinationServiceName", mock.Anything).Return(acl.Deny) - }, - }, - { - description: "Mesh Gateway - if service write is allowed for the Service then allow.", - cfgSnapshot: &ConfigSnapshot{ - Kind: structs.ServiceKindMeshGateway, - Service: "Service", - }, - expectedErrorMessage: "", - configureAuthorizer: func(authz *acl.MockAuthorizer) { - authz.On("ServiceWrite", "Service", mock.Anything).Return(acl.Allow) - }, - }, - { - description: "Mesh Gateway - if service write is not allowed for the Service then deny.", - cfgSnapshot: &ConfigSnapshot{ - Kind: structs.ServiceKindMeshGateway, - Service: "Service", - }, - expectedErrorMessage: "rpc error: code = PermissionDenied desc = Permission denied: token with AccessorID '' lacks permission 'service:write' on \"Service\"", - configureAuthorizer: func(authz *acl.MockAuthorizer) { - authz.On("ServiceWrite", "Service", mock.Anything).Return(acl.Deny) - }, - }, - { - description: "Terminating Gateway - if service write is allowed for the Service then allow.", - cfgSnapshot: &ConfigSnapshot{ - Kind: structs.ServiceKindTerminatingGateway, - Service: "Service", - }, - expectedErrorMessage: "rpc error: code = PermissionDenied desc = Permission denied: token with AccessorID '' lacks permission 'service:write' on \"Service\"", - configureAuthorizer: func(authz *acl.MockAuthorizer) { - authz.On("ServiceWrite", "Service", mock.Anything).Return(acl.Deny) - }, - }, - { - description: "Terminating Gateway - if service write is not allowed for the Service then deny.", - cfgSnapshot: &ConfigSnapshot{ - Kind: structs.ServiceKindTerminatingGateway, - Service: "Service", - }, - expectedErrorMessage: "rpc error: code = PermissionDenied desc = Permission denied: token with AccessorID '' lacks permission 'service:write' on \"Service\"", - configureAuthorizer: func(authz *acl.MockAuthorizer) { - authz.On("ServiceWrite", "Service", mock.Anything).Return(acl.Deny) - }, - }, - { - description: "Ingress Gateway - if service write is allowed for the Service then allow.", - cfgSnapshot: &ConfigSnapshot{ - Kind: structs.ServiceKindIngressGateway, - Service: "Service", - }, - expectedErrorMessage: "rpc error: code = PermissionDenied desc = Permission denied: token with AccessorID '' lacks permission 'service:write' on \"Service\"", - configureAuthorizer: func(authz *acl.MockAuthorizer) { - authz.On("ServiceWrite", "Service", mock.Anything).Return(acl.Deny) - }, - }, - { - description: "Ingress Gateway - if service write is not allowed for the Service then deny.", - cfgSnapshot: &ConfigSnapshot{ - Kind: structs.ServiceKindIngressGateway, - Service: "Service", - }, - expectedErrorMessage: "rpc error: code = PermissionDenied desc = Permission denied: token with AccessorID '' lacks permission 'service:write' on \"Service\"", - configureAuthorizer: func(authz *acl.MockAuthorizer) { - authz.On("ServiceWrite", "Service", mock.Anything).Return(acl.Deny) - }, - }, - { - description: "API Gateway - if service write is allowed for the Service then allow.", - cfgSnapshot: &ConfigSnapshot{ - Kind: structs.ServiceKindAPIGateway, - Service: "Service", - }, - expectedErrorMessage: "rpc error: code = PermissionDenied desc = Permission denied: token with AccessorID '' lacks permission 'service:write' on \"Service\"", - configureAuthorizer: func(authz *acl.MockAuthorizer) { - authz.On("ServiceWrite", "Service", mock.Anything).Return(acl.Deny) - }, - }, - { - description: "API Gateway - if service write is not allowed for the Service then deny.", - cfgSnapshot: &ConfigSnapshot{ - Kind: structs.ServiceKindAPIGateway, - Service: "Service", - }, - expectedErrorMessage: "rpc error: code = PermissionDenied desc = Permission denied: token with AccessorID '' lacks permission 'service:write' on \"Service\"", - configureAuthorizer: func(authz *acl.MockAuthorizer) { - authz.On("ServiceWrite", "Service", mock.Anything).Return(acl.Deny) - }, - }, - } - for _, tc := range testsCases { - t.Run(tc.description, func(t *testing.T) { - authz := &acl.MockAuthorizer{} - authz.On("ToAllow").Return(acl.AllowAuthorizer{Authorizer: authz}) - tc.configureAuthorizer(authz) - err := tc.cfgSnapshot.Authorize(authz) - errMsg := "" - if err != nil { - errMsg = err.Error() - } - // using contains because Enterprise tests append the parition and namespace - // information to the message. - require.True(t, strings.Contains(errMsg, tc.expectedErrorMessage)) - }) - } -} diff --git a/agent/proxycfg/connect_proxy.go b/agent/proxycfg/connect_proxy.go index 0a8c1737923ee..7dcbe18e71957 100644 --- a/agent/proxycfg/connect_proxy.go +++ b/agent/proxycfg/connect_proxy.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg diff --git a/agent/proxycfg/data_sources.go b/agent/proxycfg/data_sources.go index dfb9a70f357b8..ee779dfb6c884 100644 --- a/agent/proxycfg/data_sources.go +++ b/agent/proxycfg/data_sources.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg diff --git a/agent/proxycfg/data_sources_ce.go b/agent/proxycfg/data_sources_ce.go index fce4830d84cea..5a92e9486b0dd 100644 --- a/agent/proxycfg/data_sources_ce.go +++ b/agent/proxycfg/data_sources_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package proxycfg diff --git a/agent/proxycfg/deep-copy.sh b/agent/proxycfg/deep-copy.sh index 2e1f361dd7101..17791e79b1194 100755 --- a/agent/proxycfg/deep-copy.sh +++ b/agent/proxycfg/deep-copy.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 readonly PACKAGE_DIR="$(dirname "${BASH_SOURCE[0]}")" diff --git a/agent/proxycfg/ingress_gateway.go b/agent/proxycfg/ingress_gateway.go index 3ab5828add40a..efb774c9b17c3 100644 --- a/agent/proxycfg/ingress_gateway.go +++ b/agent/proxycfg/ingress_gateway.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg diff --git a/agent/proxycfg/internal/watch/watchmap.go b/agent/proxycfg/internal/watch/watchmap.go index d4fba2ea03eb8..c36ec3237cc66 100644 --- a/agent/proxycfg/internal/watch/watchmap.go +++ b/agent/proxycfg/internal/watch/watchmap.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package watch diff --git a/agent/proxycfg/internal/watch/watchmap_test.go b/agent/proxycfg/internal/watch/watchmap_test.go index 54fb51d4df9ba..c5bef8e471088 100644 --- a/agent/proxycfg/internal/watch/watchmap_test.go +++ b/agent/proxycfg/internal/watch/watchmap_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package watch diff --git a/agent/proxycfg/manager.go b/agent/proxycfg/manager.go index b01787f2c1d12..a942fd1d1e147 100644 --- a/agent/proxycfg/manager.go +++ b/agent/proxycfg/manager.go @@ -1,11 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg import ( "errors" - "github.com/hashicorp/consul/lib/channels" "runtime/debug" "sync" @@ -13,7 +12,6 @@ import ( "golang.org/x/time/rate" "github.com/hashicorp/consul/agent/structs" - proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" "github.com/hashicorp/consul/tlsutil" ) @@ -38,6 +36,10 @@ type ProxyID struct { // from overwriting each other's registrations. type ProxySource string +// CancelFunc is a type for a returned function that can be called to cancel a +// watch. +type CancelFunc func() + // Manager provides an API with which proxy services can be registered, and // coordinates the fetching (and refreshing) of intentions, upstreams, discovery // chain, certificates etc. @@ -53,7 +55,7 @@ type Manager struct { mu sync.Mutex proxies map[ProxyID]*state - watchers map[ProxyID]map[uint64]chan proxysnapshot.ProxySnapshot + watchers map[ProxyID]map[uint64]chan *ConfigSnapshot maxWatchID uint64 } @@ -104,7 +106,7 @@ func NewManager(cfg ManagerConfig) (*Manager, error) { m := &Manager{ ManagerConfig: cfg, proxies: make(map[ProxyID]*state), - watchers: make(map[ProxyID]map[uint64]chan proxysnapshot.ProxySnapshot), + watchers: make(map[ProxyID]map[uint64]chan *ConfigSnapshot), rateLimiter: rate.NewLimiter(cfg.UpdateRateLimit, 1), } return m, nil @@ -260,31 +262,53 @@ func (m *Manager) notify(snap *ConfigSnapshot) { // it will drain the chan and then re-attempt delivery so that a slow consumer // gets the latest config earlier. This MUST be called from a method where m.mu // is held to be safe since it assumes we are the only goroutine sending on ch. -func (m *Manager) deliverLatest(snap proxysnapshot.ProxySnapshot, ch chan proxysnapshot.ProxySnapshot) { - m.Logger.Trace("delivering latest proxy snapshot to proxy", "proxyID", snap.(*ConfigSnapshot).ProxyID) - err := channels.DeliverLatest(snap, ch) - if err != nil { - m.Logger.Error("failed to deliver proxyState to proxy", - "proxy", snap.(*ConfigSnapshot).ProxyID, - ) +func (m *Manager) deliverLatest(snap *ConfigSnapshot, ch chan *ConfigSnapshot) { + // Send if chan is empty + select { + case ch <- snap: + return + default: } + // Not empty, drain the chan of older snapshots and redeliver. For now we only + // use 1-buffered chans but this will still work if we change that later. +OUTER: + for { + select { + case <-ch: + continue + default: + break OUTER + } + } + + // Now send again + select { + case ch <- snap: + return + default: + // This should not be possible since we should be the only sender, enforced + // by m.mu but error and drop the update rather than panic. + m.Logger.Error("failed to deliver ConfigSnapshot to proxy", + "proxy", snap.ProxyID.String(), + ) + } } // Watch registers a watch on a proxy. It might not exist yet in which case this // will not fail, but no updates will be delivered until the proxy is // registered. If there is already a valid snapshot in memory, it will be // delivered immediately. -func (m *Manager) Watch(id ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) { +func (m *Manager) Watch(id ProxyID) (<-chan *ConfigSnapshot, CancelFunc) { m.mu.Lock() defer m.mu.Unlock() // This buffering is crucial otherwise we'd block immediately trying to // deliver the current snapshot below if we already have one. - ch := make(chan proxysnapshot.ProxySnapshot, 1) + ch := make(chan *ConfigSnapshot, 1) watchers, ok := m.watchers[id] if !ok { - watchers = make(map[uint64]chan proxysnapshot.ProxySnapshot) + watchers = make(map[uint64]chan *ConfigSnapshot) } watchID := m.maxWatchID m.maxWatchID++ diff --git a/agent/proxycfg/manager_test.go b/agent/proxycfg/manager_test.go index 7c83b5c770d26..0595b1e8273d1 100644 --- a/agent/proxycfg/manager_test.go +++ b/agent/proxycfg/manager_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg @@ -17,7 +17,6 @@ import ( "github.com/hashicorp/consul/agent/proxycfg/internal/watch" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" - proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" "github.com/hashicorp/consul/proto/private/pbpeering" "github.com/hashicorp/consul/sdk/testutil" ) @@ -471,7 +470,7 @@ func testManager_BasicLifecycle( require.Len(t, m.watchers, 0) } -func assertWatchChanBlocks(t *testing.T, ch <-chan proxysnapshot.ProxySnapshot) { +func assertWatchChanBlocks(t *testing.T, ch <-chan *ConfigSnapshot) { t.Helper() select { @@ -481,7 +480,7 @@ func assertWatchChanBlocks(t *testing.T, ch <-chan proxysnapshot.ProxySnapshot) } } -func assertWatchChanRecvs(t *testing.T, ch <-chan proxysnapshot.ProxySnapshot, expect proxysnapshot.ProxySnapshot) { +func assertWatchChanRecvs(t *testing.T, ch <-chan *ConfigSnapshot, expect *ConfigSnapshot) { t.Helper() select { @@ -519,7 +518,7 @@ func TestManager_deliverLatest(t *testing.T) { } // test 1 buffered chan - ch1 := make(chan proxysnapshot.ProxySnapshot, 1) + ch1 := make(chan *ConfigSnapshot, 1) // Sending to an unblocked chan should work m.deliverLatest(snap1, ch1) @@ -535,7 +534,7 @@ func TestManager_deliverLatest(t *testing.T) { require.Equal(t, snap2, <-ch1) // Same again for 5-buffered chan - ch5 := make(chan proxysnapshot.ProxySnapshot, 5) + ch5 := make(chan *ConfigSnapshot, 5) // Sending to an unblocked chan should work m.deliverLatest(snap1, ch5) diff --git a/agent/proxycfg/mesh_gateway.go b/agent/proxycfg/mesh_gateway.go index c9fe60a892894..3c74a6e559e8e 100644 --- a/agent/proxycfg/mesh_gateway.go +++ b/agent/proxycfg/mesh_gateway.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/consul/acl" + cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/leafcert" "github.com/hashicorp/consul/agent/proxycfg/internal/watch" diff --git a/agent/proxycfg/mesh_gateway_ce.go b/agent/proxycfg/mesh_gateway_ce.go index 1c1156232d71c..2959a8383a1e8 100644 --- a/agent/proxycfg/mesh_gateway_ce.go +++ b/agent/proxycfg/mesh_gateway_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package proxycfg diff --git a/agent/proxycfg/naming.go b/agent/proxycfg/naming.go index a9bd5fd8c0ca4..07aa42f2f4dd4 100644 --- a/agent/proxycfg/naming.go +++ b/agent/proxycfg/naming.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg diff --git a/agent/proxycfg/naming_ce.go b/agent/proxycfg/naming_ce.go index 7a375b6df944e..858b8d3553dfd 100644 --- a/agent/proxycfg/naming_ce.go +++ b/agent/proxycfg/naming_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package proxycfg diff --git a/agent/proxycfg/naming_test.go b/agent/proxycfg/naming_test.go index 0615a81281824..caf917f5d9757 100644 --- a/agent/proxycfg/naming_test.go +++ b/agent/proxycfg/naming_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg diff --git a/agent/proxycfg/proxycfg.deepcopy.go b/agent/proxycfg/proxycfg.deepcopy.go index 68a5bd0db5fc6..a63d2a48adc7f 100644 --- a/agent/proxycfg/proxycfg.deepcopy.go +++ b/agent/proxycfg/proxycfg.deepcopy.go @@ -5,7 +5,7 @@ package proxycfg import ( "context" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/agent/xds/config" + xds "github.com/hashicorp/consul/agent/xds/config" "github.com/hashicorp/consul/proto/private/pbpeering" "github.com/hashicorp/consul/types" "time" @@ -14,10 +14,6 @@ import ( // DeepCopy generates a deep copy of *ConfigSnapshot func (o *ConfigSnapshot) DeepCopy() *ConfigSnapshot { var cp ConfigSnapshot = *o - if o.ServiceLocality != nil { - cp.ServiceLocality = new(structs.Locality) - *cp.ServiceLocality = *o.ServiceLocality - } if o.ServiceMeta != nil { cp.ServiceMeta = make(map[string]string, len(o.ServiceMeta)) for k2, v2 := range o.ServiceMeta { @@ -149,7 +145,7 @@ func (o *ConfigSnapshot) DeepCopy() *ConfigSnapshot { cp.APIGateway = *retV } if o.computedFields.xdsCommonConfig != nil { - cp.computedFields.xdsCommonConfig = new(config.XDSCommonConfig) + cp.computedFields.xdsCommonConfig = new(xds.XDSCommonConfig) *cp.computedFields.xdsCommonConfig = *o.computedFields.xdsCommonConfig if o.computedFields.xdsCommonConfig.XDSFetchTimeoutMs != nil { cp.computedFields.xdsCommonConfig.XDSFetchTimeoutMs = new(int) @@ -157,7 +153,7 @@ func (o *ConfigSnapshot) DeepCopy() *ConfigSnapshot { } } if o.computedFields.proxyConfig != nil { - cp.computedFields.proxyConfig = new(config.ProxyConfig) + cp.computedFields.proxyConfig = new(xds.ProxyConfig) *cp.computedFields.proxyConfig = *o.computedFields.proxyConfig if o.computedFields.proxyConfig.LocalRequestTimeoutMs != nil { cp.computedFields.proxyConfig.LocalRequestTimeoutMs = new(int) @@ -169,7 +165,7 @@ func (o *ConfigSnapshot) DeepCopy() *ConfigSnapshot { } } if o.computedFields.gatewayConfig != nil { - cp.computedFields.gatewayConfig = new(config.GatewayConfig) + cp.computedFields.gatewayConfig = new(xds.GatewayConfig) *cp.computedFields.gatewayConfig = *o.computedFields.gatewayConfig if o.computedFields.gatewayConfig.BindAddresses != nil { cp.computedFields.gatewayConfig.BindAddresses = make(map[string]structs.ServiceAddress, len(o.computedFields.gatewayConfig.BindAddresses)) diff --git a/agent/proxycfg/proxycfg.go b/agent/proxycfg/proxycfg.go index 9b71156dfe5aa..f73e6ac726ceb 100644 --- a/agent/proxycfg/proxycfg.go +++ b/agent/proxycfg/proxycfg.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // Package proxycfg contains components for sourcing the data required to // configure Connect proxies. The Manager provides an API with which proxy @@ -45,7 +45,7 @@ // ConfigSource - on a client agent this would be a local config source, on a // server it would be a catalog config source. // 4. On server, the catalog config source will check if service is registered locally. -// 4a. If the service *is* registered locally it hands off the local config +// 4a. If the service *is* registered locally it hands off the the local config // source, which calls Watch on the proxycfg manager (and serves the pre- // fetched data). // 5. Otherwise, it fetches the service from the state store. diff --git a/agent/proxycfg/snapshot.go b/agent/proxycfg/snapshot.go index 8f407c1afcd57..522a7dafa0df7 100644 --- a/agent/proxycfg/snapshot.go +++ b/agent/proxycfg/snapshot.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/consul/agent/consul/discoverychain" "github.com/hashicorp/consul/agent/proxycfg/internal/watch" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/agent/xds/config" + config "github.com/hashicorp/consul/agent/xds/config" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/proto/private/pbpeering" "github.com/hashicorp/go-hclog" @@ -936,7 +936,6 @@ func IngressListenerKeyFromListener(l structs.IngressListener) IngressListenerKe type ConfigSnapshot struct { Kind structs.ServiceKind Service string - ServiceLocality *structs.Locality ProxyID ProxyID Address string Port int diff --git a/agent/proxycfg/snapshot_test.go b/agent/proxycfg/snapshot_test.go index 4978316c0b98c..1959c0606e4da 100644 --- a/agent/proxycfg/snapshot_test.go +++ b/agent/proxycfg/snapshot_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg diff --git a/agent/proxycfg/state.go b/agent/proxycfg/state.go index a113903c150cb..57d08c2ecb635 100644 --- a/agent/proxycfg/state.go +++ b/agent/proxycfg/state.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg @@ -126,7 +126,6 @@ type serviceInstance struct { taggedAddresses map[string]structs.ServiceAddress proxyCfg structs.ConnectProxyConfig token string - locality *structs.Locality } func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error) { @@ -247,7 +246,6 @@ func newServiceInstanceFromNodeService(id ProxyID, ns *structs.NodeService, toke return serviceInstance{ kind: ns.Kind, service: ns.Service, - locality: ns.Locality, proxyID: id, address: ns.Address, port: ns.Port, @@ -307,7 +305,6 @@ func newConfigSnapshotFromServiceInstance(s serviceInstance, config stateConfig) return ConfigSnapshot{ Kind: s.kind, Service: s.service, - ServiceLocality: s.locality, ProxyID: s.proxyID, Address: s.address, Port: s.port, diff --git a/agent/proxycfg/state_ce_test.go b/agent/proxycfg/state_ce_test.go index 61ba1fcc3a536..e817aeef0b13f 100644 --- a/agent/proxycfg/state_ce_test.go +++ b/agent/proxycfg/state_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package proxycfg diff --git a/agent/proxycfg/state_test.go b/agent/proxycfg/state_test.go index 43743eb40a032..f6903cbe52f92 100644 --- a/agent/proxycfg/state_test.go +++ b/agent/proxycfg/state_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg diff --git a/agent/proxycfg/terminating_gateway.go b/agent/proxycfg/terminating_gateway.go index a465808390ade..4e2f172afb2bb 100644 --- a/agent/proxycfg/terminating_gateway.go +++ b/agent/proxycfg/terminating_gateway.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg diff --git a/agent/proxycfg/testing.go b/agent/proxycfg/testing.go index bdd565ec0454e..ac68994cb8f18 100644 --- a/agent/proxycfg/testing.go +++ b/agent/proxycfg/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg @@ -167,7 +167,7 @@ func TestUpstreamNodes(t testing.T, service string) structs.CheckServiceNodes { Datacenter: "dc1", Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, - Service: structs.TestNodeServiceWithName(service), + Service: structs.TestNodeServiceWithName(t, service), }, structs.CheckServiceNode{ Node: &structs.Node{ @@ -177,47 +177,7 @@ func TestUpstreamNodes(t testing.T, service string) structs.CheckServiceNodes { Datacenter: "dc1", Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, - Service: structs.TestNodeServiceWithName(service), - }, - } -} - -// TestUpstreamNodesWithServiceSubset returns a sample service discovery result with one instance tagged v1 -// and the other tagged v2 -func TestUpstreamNodesWithServiceSubset(t testing.T, service string) structs.CheckServiceNodes { - return structs.CheckServiceNodes{ - structs.CheckServiceNode{ - Node: &structs.Node{ - ID: "test1", - Node: "test1", - Address: "10.10.1.3", - Datacenter: "dc1", - Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), - }, - Service: &structs.NodeService{ - Kind: structs.ServiceKindTypical, - Service: service, - Port: 8080, - Meta: map[string]string{"Version": "1"}, - Weights: &structs.Weights{ - Passing: 300, // Check that this gets normalized to 128 - }, - }, - }, - structs.CheckServiceNode{ - Node: &structs.Node{ - ID: "test2", - Node: "test2", - Address: "10.10.1.4", - Datacenter: "dc1", - Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), - }, - Service: &structs.NodeService{ - Kind: structs.ServiceKindTypical, - Service: service, - Port: 8080, - Meta: map[string]string{"Version": "2"}, - }, + Service: structs.TestNodeServiceWithName(t, service), }, } } @@ -271,7 +231,7 @@ func TestUpstreamNodesInStatus(t testing.T, status string) structs.CheckServiceN Address: "10.10.1.1", Datacenter: "dc1", }, - Service: structs.TestNodeService(), + Service: structs.TestNodeService(t), Checks: structs.HealthChecks{ &structs.HealthCheck{ Node: "test1", @@ -288,7 +248,7 @@ func TestUpstreamNodesInStatus(t testing.T, status string) structs.CheckServiceN Address: "10.10.1.2", Datacenter: "dc1", }, - Service: structs.TestNodeService(), + Service: structs.TestNodeService(t), Checks: structs.HealthChecks{ &structs.HealthCheck{ Node: "test2", @@ -310,7 +270,7 @@ func TestUpstreamNodesDC2(t testing.T) structs.CheckServiceNodes { Address: "10.20.1.1", Datacenter: "dc2", }, - Service: structs.TestNodeService(), + Service: structs.TestNodeService(t), }, structs.CheckServiceNode{ Node: &structs.Node{ @@ -319,7 +279,7 @@ func TestUpstreamNodesDC2(t testing.T) structs.CheckServiceNodes { Address: "10.20.1.2", Datacenter: "dc2", }, - Service: structs.TestNodeService(), + Service: structs.TestNodeService(t), }, } } @@ -333,7 +293,7 @@ func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServi Address: "10.20.1.1", Datacenter: "dc2", }, - Service: structs.TestNodeService(), + Service: structs.TestNodeService(t), Checks: structs.HealthChecks{ &structs.HealthCheck{ Node: "test1", @@ -350,7 +310,7 @@ func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServi Address: "10.20.1.2", Datacenter: "dc2", }, - Service: structs.TestNodeService(), + Service: structs.TestNodeService(t), Checks: structs.HealthChecks{ &structs.HealthCheck{ Node: "test2", @@ -372,7 +332,7 @@ func TestUpstreamNodesAlternate(t testing.T) structs.CheckServiceNodes { Address: "10.20.1.1", Datacenter: "dc1", }, - Service: structs.TestNodeService(), + Service: structs.TestNodeService(t), }, structs.CheckServiceNode{ Node: &structs.Node{ @@ -381,7 +341,7 @@ func TestUpstreamNodesAlternate(t testing.T) structs.CheckServiceNodes { Address: "10.20.1.2", Datacenter: "dc1", }, - Service: structs.TestNodeService(), + Service: structs.TestNodeService(t), }, } } diff --git a/agent/proxycfg/testing_api_gateway.go b/agent/proxycfg/testing_api_gateway.go index ddfa17dcf6ab2..87ff58fbf0535 100644 --- a/agent/proxycfg/testing_api_gateway.go +++ b/agent/proxycfg/testing_api_gateway.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg @@ -8,9 +8,10 @@ import ( "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/discoverychain" + + "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/structs" ) diff --git a/agent/proxycfg/testing_ce.go b/agent/proxycfg/testing_ce.go index 97c23d82eb3dd..202252a3a7335 100644 --- a/agent/proxycfg/testing_ce.go +++ b/agent/proxycfg/testing_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package proxycfg diff --git a/agent/proxycfg/testing_connect_proxy.go b/agent/proxycfg/testing_connect_proxy.go index cf6f4a479b1eb..a929aa52f167d 100644 --- a/agent/proxycfg/testing_connect_proxy.go +++ b/agent/proxycfg/testing_connect_proxy.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg @@ -227,14 +227,6 @@ func TestConfigSnapshotExposeConfig(t testing.T, nsFn func(ns *structs.NodeServi } func TestConfigSnapshotExposeChecks(t testing.T) *ConfigSnapshot { - return testConfigSnapshotExposedChecks(t, false) -} - -func TestConfigSnapshotExposeChecksWithBindOverride(t testing.T) *ConfigSnapshot { - return testConfigSnapshotExposedChecks(t, true) -} - -func testConfigSnapshotExposedChecks(t testing.T, overrideBind bool) *ConfigSnapshot { return TestConfigSnapshot(t, func(ns *structs.NodeService) { ns.Address = "1.2.3.4" @@ -243,12 +235,6 @@ func testConfigSnapshotExposedChecks(t testing.T, overrideBind bool) *ConfigSnap ns.Proxy.Expose = structs.ExposeConfig{ Checks: true, } - if overrideBind { - if ns.Proxy.Config == nil { - ns.Proxy.Config = map[string]any{} - } - ns.Proxy.Config["bind_address"] = "6.7.8.9" - } }, []UpdateEvent{ { @@ -267,32 +253,6 @@ func testConfigSnapshotExposedChecks(t testing.T, overrideBind bool) *ConfigSnap ) } -func TestConfigSnapshotExposeChecksGRPC(t testing.T) *ConfigSnapshot { - return TestConfigSnapshot(t, - func(ns *structs.NodeService) { - ns.Address = "1.2.3.4" - ns.Port = 9090 - ns.Proxy.Upstreams = nil - ns.Proxy.Expose = structs.ExposeConfig{ - Checks: true, - } - }, - []UpdateEvent{ - { - CorrelationID: svcChecksWatchIDPrefix + structs.ServiceIDString("web", nil), - Result: []structs.CheckType{{ - CheckID: types.CheckID("grpc"), - Name: "grpc", - GRPC: "localhost:9090/v1.Health", - ProxyGRPC: "localhost:21501/myservice", - Interval: 10 * time.Second, - Timeout: 1 * time.Second, - }}, - }, - }, - ) -} - func TestConfigSnapshotGRPCExposeHTTP1(t testing.T) *ConfigSnapshot { roots, leaf := TestCerts(t) diff --git a/agent/proxycfg/testing_ingress_gateway.go b/agent/proxycfg/testing_ingress_gateway.go index f178955c18ab4..87a3313ecdb50 100644 --- a/agent/proxycfg/testing_ingress_gateway.go +++ b/agent/proxycfg/testing_ingress_gateway.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg @@ -1364,7 +1364,7 @@ func TestConfigSnapshotIngressGateway_TLSMinVersionListenersGatewayDefaults(t te Port: 8083, Protocol: "http", Services: []structs.IngressService{ - {Name: "s4"}, + {Name: "s3"}, }, TLS: &structs.GatewayTLSConfig{ Enabled: true, @@ -1888,8 +1888,8 @@ func TestConfigSnapshotIngressGateway_TLSMixedMinVersionListeners(t testing.T) * entry.TLS.Enabled = true entry.TLS.TLSMinVersion = types.TLSv1_2 - // One listener should inherit TLS minimum version from the gateway config, - // two others each set explicit TLS minimum versions + // One listener disables TLS, one inherits TLS minimum version from the gateway + // config, two others set different versions entry.Listeners = []structs.IngressListener{ { Port: 8080, @@ -1925,6 +1925,8 @@ func TestConfigSnapshotIngressGateway_TLSMixedMinVersionListeners(t testing.T) * { CorrelationID: gatewayServicesWatchID, Result: &structs.IndexedGatewayServices{ + // One listener should inherit TLS minimum version from the gateway config, + // two others each set explicit TLS minimum versions Services: []*structs.GatewayService{ { Service: s1, @@ -1982,208 +1984,3 @@ func TestConfigSnapshotIngressGateway_TLSMixedMinVersionListeners(t testing.T) * }, }) } - -func TestConfigSnapshotIngressGateway_TLSMixedMaxVersionListeners(t testing.T) *ConfigSnapshot { - var ( - s1 = structs.NewServiceName("s1", nil) - s1UID = NewUpstreamIDFromServiceName(s1) - s1Chain = discoverychain.TestCompileConfigEntries(t, "s1", "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil) - - s2 = structs.NewServiceName("s2", nil) - s2UID = NewUpstreamIDFromServiceName(s2) - s2Chain = discoverychain.TestCompileConfigEntries(t, "s2", "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil) - - s3 = structs.NewServiceName("s3", nil) - s3UID = NewUpstreamIDFromServiceName(s3) - s3Chain = discoverychain.TestCompileConfigEntries(t, "s3", "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil) - ) - - return TestConfigSnapshotIngressGateway(t, true, "tcp", "default", nil, - func(entry *structs.IngressGatewayConfigEntry) { - entry.TLS.Enabled = true - entry.TLS.TLSMaxVersion = types.TLSv1_2 - - // One listener should inherit TLS maximum version from the gateway config, - // two others each set explicit TLS maximum versions - entry.Listeners = []structs.IngressListener{ - { - Port: 8080, - Protocol: "http", - Services: []structs.IngressService{ - {Name: "s1"}, - }, - }, - { - Port: 8081, - Protocol: "http", - Services: []structs.IngressService{ - {Name: "s2"}, - }, - TLS: &structs.GatewayTLSConfig{ - Enabled: true, - TLSMaxVersion: types.TLSv1_0, - }, - }, - { - Port: 8082, - Protocol: "http", - Services: []structs.IngressService{ - {Name: "s3"}, - }, - TLS: &structs.GatewayTLSConfig{ - Enabled: true, - TLSMaxVersion: types.TLSv1_3, - }, - }, - } - }, []UpdateEvent{ - { - CorrelationID: gatewayServicesWatchID, - Result: &structs.IndexedGatewayServices{ - Services: []*structs.GatewayService{ - { - Service: s1, - Port: 8080, - Protocol: "http", - }, - { - Service: s2, - Port: 8081, - Protocol: "http", - }, - { - Service: s3, - Port: 8082, - Protocol: "http", - }, - }, - }, - }, - { - CorrelationID: "discovery-chain:" + s1UID.String(), - Result: &structs.DiscoveryChainResponse{ - Chain: s1Chain, - }, - }, - { - CorrelationID: "discovery-chain:" + s2UID.String(), - Result: &structs.DiscoveryChainResponse{ - Chain: s2Chain, - }, - }, - { - CorrelationID: "discovery-chain:" + s3UID.String(), - Result: &structs.DiscoveryChainResponse{ - Chain: s3Chain, - }, - }, - { - CorrelationID: "upstream-target:" + s1Chain.ID() + ":" + s1UID.String(), - Result: &structs.IndexedCheckServiceNodes{ - Nodes: TestUpstreamNodes(t, "s1"), - }, - }, - { - CorrelationID: "upstream-target:" + s2Chain.ID() + ":" + s2UID.String(), - Result: &structs.IndexedCheckServiceNodes{ - Nodes: TestUpstreamNodes(t, "s2"), - }, - }, - { - CorrelationID: "upstream-target:" + s3Chain.ID() + ":" + s3UID.String(), - Result: &structs.IndexedCheckServiceNodes{ - Nodes: TestUpstreamNodes(t, "s3"), - }, - }, - }) -} - -func TestConfigSnapshotIngressGateway_TLSMixedCipherVersionListeners(t testing.T) *ConfigSnapshot { - var ( - s1 = structs.NewServiceName("s1", nil) - s1UID = NewUpstreamIDFromServiceName(s1) - s1Chain = discoverychain.TestCompileConfigEntries(t, "s1", "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil) - - s2 = structs.NewServiceName("s2", nil) - s2UID = NewUpstreamIDFromServiceName(s2) - s2Chain = discoverychain.TestCompileConfigEntries(t, "s2", "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil) - ) - - return TestConfigSnapshotIngressGateway(t, true, "tcp", "default", nil, - func(entry *structs.IngressGatewayConfigEntry) { - entry.TLS.Enabled = true - entry.TLS.CipherSuites = []types.TLSCipherSuite{ - types.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - } - - // One listener should inherit TLS Ciphers from the gateway config, - // the other should be set explicitly from the listener config - entry.Listeners = []structs.IngressListener{ - { - Port: 8080, - Protocol: "http", - Services: []structs.IngressService{ - {Name: "s1"}, - }, - }, - { - Port: 8081, - Protocol: "http", - Services: []structs.IngressService{ - {Name: "s2"}, - }, - TLS: &structs.GatewayTLSConfig{ - Enabled: true, - CipherSuites: []types.TLSCipherSuite{ - types.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, - types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - }, - }, - }, - } - }, []UpdateEvent{ - { - CorrelationID: gatewayServicesWatchID, - Result: &structs.IndexedGatewayServices{ - // One listener should inherit TLS minimum version from the gateway config, - // two others each set explicit TLS minimum versions - Services: []*structs.GatewayService{ - { - Service: s1, - Port: 8080, - Protocol: "http", - }, - { - Service: s2, - Port: 8081, - Protocol: "http", - }, - }, - }, - }, - { - CorrelationID: "discovery-chain:" + s1UID.String(), - Result: &structs.DiscoveryChainResponse{ - Chain: s1Chain, - }, - }, - { - CorrelationID: "discovery-chain:" + s2UID.String(), - Result: &structs.DiscoveryChainResponse{ - Chain: s2Chain, - }, - }, - { - CorrelationID: "upstream-target:" + s1Chain.ID() + ":" + s1UID.String(), - Result: &structs.IndexedCheckServiceNodes{ - Nodes: TestUpstreamNodes(t, "s1"), - }, - }, - { - CorrelationID: "upstream-target:" + s2Chain.ID() + ":" + s2UID.String(), - Result: &structs.IndexedCheckServiceNodes{ - Nodes: TestUpstreamNodes(t, "s2"), - }, - }, - }) -} diff --git a/agent/proxycfg/testing_mesh_gateway.go b/agent/proxycfg/testing_mesh_gateway.go index 72a3b737309b2..abc8825484550 100644 --- a/agent/proxycfg/testing_mesh_gateway.go +++ b/agent/proxycfg/testing_mesh_gateway.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg @@ -23,10 +23,9 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st roots, _ := TestCertsForMeshGateway(t) var ( - populateServices = true - useFederationStates = false - deleteCrossDCEntry = false - meshGatewayFederation = false + populateServices = true + useFederationStates = false + deleteCrossDCEntry = false ) switch variant { @@ -35,11 +34,6 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st populateServices = true useFederationStates = true deleteCrossDCEntry = true - case "mesh-gateway-federation": - populateServices = true - useFederationStates = true - deleteCrossDCEntry = true - meshGatewayFederation = true case "newer-info-in-federation-states": populateServices = true useFederationStates = true @@ -453,63 +447,6 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st }) } - var serverSNIFn ServerSNIFunc - if meshGatewayFederation { - - // reproduced from tlsutil/config.go - serverSNIFn = func(dc, nodeName string) string { - // Strip the trailing '.' from the domain if any - domain := "consul" - - if nodeName == "" || nodeName == "*" { - return "server." + dc + "." + domain - } - - return nodeName + ".server." + dc + "." + domain - } - - baseEvents = testSpliceEvents(baseEvents, []UpdateEvent{ - { - CorrelationID: consulServerListWatchID, - Result: &structs.IndexedCheckServiceNodes{ - Nodes: structs.CheckServiceNodes{ - { - Node: &structs.Node{ - Datacenter: "dc1", - Node: "node1", - Address: "127.0.0.1", - }, - Service: &structs.NodeService{ - ID: structs.ConsulServiceID, - Service: structs.ConsulServiceName, - Meta: map[string]string{ - "grpc_port": "8502", - "grpc_tls_port": "8503", - }, - }, - }, - { - Node: &structs.Node{ - Datacenter: "dc1", - Node: "node2", - Address: "127.0.0.2", - }, - Service: &structs.NodeService{ - ID: structs.ConsulServiceID, - Service: structs.ConsulServiceName, - Meta: map[string]string{ - "grpc_port": "8502", - "grpc_tls_port": "8503", - }, - }, - }, - }, - }, - }, - }) - - } - return testConfigSnapshotFixture(t, &structs.NodeService{ Kind: structs.ServiceKindMeshGateway, Service: "mesh-gateway", @@ -529,7 +466,7 @@ func TestConfigSnapshotMeshGateway(t testing.T, variant string, nsFn func(ns *st Port: 443, }, }, - }, nsFn, serverSNIFn, testSpliceEvents(baseEvents, extraUpdates)) + }, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates)) } func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func(ns *structs.NodeService), extraUpdates []UpdateEvent) *ConfigSnapshot { @@ -748,73 +685,6 @@ func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func( }, }, ) - case "mgw-peered-upstream": - // This is a modified version of "chain-and-l7-stuff" that adds a peer field to the resolver - // and removes some of the extraneous disco-chain testing. - entries = []structs.ConfigEntry{ - &structs.ProxyConfigEntry{ - Kind: structs.ProxyDefaults, - Name: structs.ProxyConfigGlobal, - Config: map[string]interface{}{ - "protocol": "http", - }, - }, - &structs.ServiceResolverConfigEntry{ - Kind: structs.ServiceResolver, - Name: "db", - Redirect: &structs.ServiceResolverRedirect{ - Service: "alt", - Peer: "peer-b", - }, - ConnectTimeout: 33 * time.Second, - RequestTimeout: 33 * time.Second, - }, - } - for _, entry := range entries { - require.NoError(t, entry.Normalize()) - require.NoError(t, entry.Validate()) - } - - set := configentry.NewDiscoveryChainSet() - set.AddEntries(entries...) - - var ( - dbSN = structs.NewServiceName("db", nil) - altSN = structs.NewServiceName("alt", nil) - - dbChain = discoverychain.TestCompileConfigEntries(t, "db", "default", "default", "dc1", connect.TestClusterID+".consul", nil, set) - ) - - needPeerA = true - needLeaf = true - discoChains[dbSN] = dbChain - endpoints[dbSN] = TestUpstreamNodes(t, "db") - endpoints[altSN] = TestUpstreamNodes(t, "alt") - - extraUpdates = append(extraUpdates, - UpdateEvent{ - CorrelationID: datacentersWatchID, - Result: &[]string{"dc1"}, - }, - UpdateEvent{ - CorrelationID: exportedServiceListWatchID, - Result: &structs.IndexedExportedServiceList{ - Services: map[string]structs.ServiceList{ - "peer-a": []structs.ServiceName{dbSN}, - }, - }, - }, - UpdateEvent{ - CorrelationID: serviceListWatchID, - Result: &structs.IndexedServiceList{ - Services: []structs.ServiceName{ - dbSN, - altSN, - }, - }, - }, - ) - case "chain-and-l7-stuff": entries = []structs.ConfigEntry{ &structs.ProxyConfigEntry{ @@ -835,12 +705,8 @@ func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func( Kind: structs.ServiceResolver, Name: "api", Subsets: map[string]structs.ServiceResolverSubset{ - "v1": { - Filter: "Service.Meta.Version == 1", - }, "v2": { - Filter: "Service.Meta.Version == 2", - OnlyPassing: true, + Filter: "Service.Meta.version == v2", }, }, }, @@ -890,7 +756,6 @@ func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func( var ( dbSN = structs.NewServiceName("db", nil) altSN = structs.NewServiceName("alt", nil) - apiSN = structs.NewServiceName("api", nil) dbChain = discoverychain.TestCompileConfigEntries(t, "db", "default", "default", "dc1", connect.TestClusterID+".consul", nil, set) ) @@ -900,7 +765,6 @@ func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func( discoChains[dbSN] = dbChain endpoints[dbSN] = TestUpstreamNodes(t, "db") endpoints[altSN] = TestUpstreamNodes(t, "alt") - endpoints[apiSN] = TestUpstreamNodesWithServiceSubset(t, "api") extraUpdates = append(extraUpdates, UpdateEvent{ @@ -924,29 +788,7 @@ func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func( }, }, }, - UpdateEvent{ - CorrelationID: serviceResolversWatchID, - Result: &structs.IndexedConfigEntries{ - Kind: structs.ServiceResolver, - Entries: []structs.ConfigEntry{ - &structs.ServiceResolverConfigEntry{ - Kind: structs.ServiceResolver, - Name: "api", - Subsets: map[string]structs.ServiceResolverSubset{ - "v1": { - Filter: "Service.Meta.Version == 1", - }, - "v2": { - Filter: "Service.Meta.Version == 2", - OnlyPassing: true, - }, - }, - }, - }, - }, - }, ) - case "peer-through-mesh-gateway": extraUpdates = append(extraUpdates, diff --git a/agent/proxycfg/testing_peering.go b/agent/proxycfg/testing_peering.go index ee5e7eaceba40..6cc493fe92f22 100644 --- a/agent/proxycfg/testing_peering.go +++ b/agent/proxycfg/testing_peering.go @@ -1,32 +1,20 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg import ( - "bytes" - "text/template" - "github.com/mitchellh/go-testing-interface" - "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto/private/pbpeering" ) func TestConfigSnapshotPeering(t testing.T) *ConfigSnapshot { - return testConfigSnapshot(t, false, false, nil) -} - -func TestConfigSnapshotPeeringWithEscapeOverrides(t testing.T) *ConfigSnapshot { - return testConfigSnapshot(t, true, false, nil) -} - -func TestConfigSnapshotPeeringWithHTTP2(t testing.T, nsFn func(ns *structs.NodeService)) *ConfigSnapshot { - return testConfigSnapshot(t, false, true, nsFn) + return testConfigSnapshot(t, nil) } -func testConfigSnapshot(t testing.T, escapeOverride bool, useHTTP2 bool, nsFn func(ns *structs.NodeService)) *ConfigSnapshot { +func testConfigSnapshot(t testing.T, nsFn func(ns *structs.NodeService)) *ConfigSnapshot { var ( paymentsUpstream = structs.Upstream{ DestinationName: "payments", @@ -43,11 +31,6 @@ func testConfigSnapshot(t testing.T, escapeOverride bool, useHTTP2 bool, nsFn fu refundsUID = NewUpstreamID(&refundsUpstream) ) - protocol := "tcp" - if useHTTP2 { - protocol = "http2" - } - const peerTrustDomain = "1c053652-8512-4373-90cf-5a7f6263a994.consul" return TestConfigSnapshot(t, func(ns *structs.NodeService) { @@ -55,23 +38,6 @@ func testConfigSnapshot(t testing.T, escapeOverride bool, useHTTP2 bool, nsFn fu paymentsUpstream, refundsUpstream, } - - if escapeOverride { - if ns.Proxy.Upstreams[0].Config == nil { - ns.Proxy.Upstreams[0].Config = map[string]interface{}{} - } - - uid := NewUpstreamID(&ns.Proxy.Upstreams[0]) - - ns.Proxy.Upstreams[0].Config["envoy_listener_json"] = - customListenerJSON(t, customListenerJSONOptions{ - Name: uid.EnvoyID() + ":custom-upstream", - }) - ns.Proxy.Upstreams[0].Config["envoy_cluster_json"] = - customClusterJSON(t, customClusterJSONOptions{ - Name: uid.EnvoyID() + ":custom-upstream", - }) - } if nsFn != nil { nsFn(ns) } @@ -113,7 +79,7 @@ func testConfigSnapshot(t testing.T, escapeOverride bool, useHTTP2 bool, nsFn fu SpiffeID: []string{ "spiffe://" + peerTrustDomain + "/ns/default/dc/cloud-dc/svc/payments", }, - Protocol: protocol, + Protocol: "tcp", }, }, }, @@ -142,7 +108,7 @@ func testConfigSnapshot(t testing.T, escapeOverride bool, useHTTP2 bool, nsFn fu SpiffeID: []string{ "spiffe://" + peerTrustDomain + "/ns/default/dc/cloud-dc/svc/refunds", }, - Protocol: protocol, + Protocol: "tcp", }, }, }, @@ -421,93 +387,3 @@ func TestConfigSnapshotPeeringLocalMeshGateway(t testing.T) *ConfigSnapshot { }, }) } - -var ( - customListenerJSONTemplate = template.Must(template.New("").Parse(customListenerJSONTpl)) -) - -func customListenerJSON(t testing.T, opts customListenerJSONOptions) string { - t.Helper() - var buf bytes.Buffer - require.NoError(t, customListenerJSONTemplate.Execute(&buf, opts)) - return buf.String() -} - -type customListenerJSONOptions struct { - Name string - TLSContext string -} - -const customListenerJSONTpl = `{ - "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", - "name": "{{ .Name }}", - "address": { - "socketAddress": { - "address": "11.11.11.11", - "portValue": 11111 - } - }, - "filterChains": [ - { - {{ if .TLSContext -}} - "transport_socket": { - "name": "tls", - "typed_config": { - "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", - {{ .TLSContext }} - } - }, - {{- end }} - "filters": [ - { - "name": "envoy.filters.network.tcp_proxy", - "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", - "cluster": "random-cluster", - "statPrefix": "foo-stats" - } - } - ] - } - ] -}` - -type customClusterJSONOptions struct { - Name string - TLSContext string -} - -var customClusterJSONTpl = `{ - "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", - "name": "{{ .Name }}", - "connectTimeout": "15s", - "loadAssignment": { - "clusterName": "{{ .Name }}", - "endpoints": [ - { - "lbEndpoints": [ - { - "endpoint": { - "address": { - "socketAddress": { - "address": "1.2.3.4", - "portValue": 8443 - } - } - } - } - ] - } - ] - } -}` - -var customClusterJSONTemplate = template.Must(template.New("").Parse(customClusterJSONTpl)) - -func customClusterJSON(t testing.T, opts customClusterJSONOptions) string { - t.Helper() - var buf bytes.Buffer - err := customClusterJSONTemplate.Execute(&buf, opts) - require.NoError(t, err) - return buf.String() -} diff --git a/agent/proxycfg/testing_terminating_gateway.go b/agent/proxycfg/testing_terminating_gateway.go index 4b4f086e6e06e..1d7b97d79befb 100644 --- a/agent/proxycfg/testing_terminating_gateway.go +++ b/agent/proxycfg/testing_terminating_gateway.go @@ -1,11 +1,9 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg import ( - "time" - "github.com/mitchellh/go-testing-interface" "github.com/hashicorp/consul/agent/structs" @@ -650,7 +648,6 @@ func testConfigSnapshotTerminatingGatewayLBConfig(t testing.T, variant string) * OnlyPassing: true, }, }, - RequestTimeout: 200 * time.Millisecond, LoadBalancer: &structs.LoadBalancer{ Policy: "ring_hash", RingHashConfig: &structs.RingHashConfig{ diff --git a/agent/proxycfg/testing_tproxy.go b/agent/proxycfg/testing_tproxy.go index 7cb9455177e6c..5a99965af371c 100644 --- a/agent/proxycfg/testing_tproxy.go +++ b/agent/proxycfg/testing_tproxy.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg diff --git a/agent/proxycfg/testing_upstreams.go b/agent/proxycfg/testing_upstreams.go index 819d4f63d9baf..3915c0db007c9 100644 --- a/agent/proxycfg/testing_upstreams.go +++ b/agent/proxycfg/testing_upstreams.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg @@ -256,9 +256,6 @@ func setupTestVariationConfigEntriesAndSnapshot( case "chain-and-router": case "lb-resolver": case "register-to-terminating-gateway": - case "redirect-to-lb-node": - case "resolver-with-lb": - case "splitter-overweight": default: extraEvents := extraUpdateEvents(t, variation, dbUID) events = append(events, extraEvents...) @@ -530,106 +527,6 @@ func setupTestVariationDiscoveryChain( }, ) case "chain-and-splitter": - entries = append(entries, - &structs.ServiceResolverConfigEntry{ - Kind: structs.ServiceResolver, - Name: "db", - EnterpriseMeta: entMeta, - ConnectTimeout: 25 * time.Second, - }, - &structs.ProxyConfigEntry{ - Kind: structs.ProxyDefaults, - Name: structs.ProxyConfigGlobal, - EnterpriseMeta: entMeta, - Protocol: "http", - Config: map[string]interface{}{ - "protocol": "http", - }, - }, - // Adding a ServiceRouter in this case allows testing ServiceRoute.Destination timeouts. - &structs.ServiceRouterConfigEntry{ - Kind: structs.ServiceRouter, - Name: "db", - EnterpriseMeta: entMeta, - Routes: []structs.ServiceRoute{ - { - Match: &structs.ServiceRouteMatch{ - HTTP: &structs.ServiceRouteHTTPMatch{ - PathPrefix: "/big-side", - }, - }, - Destination: &structs.ServiceRouteDestination{ - Service: "big-side", - // Test disabling idle timeout. - IdleTimeout: -1 * time.Second, - // Test a positive value for request timeout. - RequestTimeout: 10 * time.Second, - }, - }, - { - Match: &structs.ServiceRouteMatch{ - HTTP: &structs.ServiceRouteHTTPMatch{ - PathPrefix: "/lil-bit-side", - }, - }, - Destination: &structs.ServiceRouteDestination{ - Service: "lil-bit-side", - // Test zero values for these timeouts. - IdleTimeout: 0 * time.Second, - RequestTimeout: 0 * time.Second, - }, - }, - }, - }, - &structs.ServiceSplitterConfigEntry{ - Kind: structs.ServiceSplitter, - Name: "db", - EnterpriseMeta: entMeta, - Splits: []structs.ServiceSplit{ - { - Weight: 1, - Service: "db", - RequestHeaders: &structs.HTTPHeaderModifiers{ - Set: map[string]string{"x-split-leg": "db"}, - }, - ResponseHeaders: &structs.HTTPHeaderModifiers{ - Set: map[string]string{"x-split-leg": "db"}, - }, - }, - { - Weight: 95.5, - Service: "big-side", - RequestHeaders: &structs.HTTPHeaderModifiers{ - Set: map[string]string{"x-split-leg": "big"}, - }, - ResponseHeaders: &structs.HTTPHeaderModifiers{ - Set: map[string]string{"x-split-leg": "big"}, - }, - }, - { - Weight: 3, - Service: "goldilocks-side", - RequestHeaders: &structs.HTTPHeaderModifiers{ - Set: map[string]string{"x-split-leg": "goldilocks"}, - }, - ResponseHeaders: &structs.HTTPHeaderModifiers{ - Set: map[string]string{"x-split-leg": "goldilocks"}, - }, - }, - { - Weight: 0.5, - Service: "lil-bit-side", - RequestHeaders: &structs.HTTPHeaderModifiers{ - Set: map[string]string{"x-split-leg": "small"}, - }, - ResponseHeaders: &structs.HTTPHeaderModifiers{ - Set: map[string]string{"x-split-leg": "small"}, - }, - }, - }, - }, - ) - case "splitter-overweight": entries = append(entries, &structs.ServiceResolverConfigEntry{ Kind: structs.ServiceResolver, @@ -653,7 +550,7 @@ func setupTestVariationDiscoveryChain( EnterpriseMeta: entMeta, Splits: []structs.ServiceSplit{ { - Weight: 100.0, + Weight: 95.5, Service: "big-side", RequestHeaders: &structs.HTTPHeaderModifiers{ Set: map[string]string{"x-split-leg": "big"}, @@ -663,7 +560,7 @@ func setupTestVariationDiscoveryChain( }, }, { - Weight: 100.0, + Weight: 4, Service: "goldilocks-side", RequestHeaders: &structs.HTTPHeaderModifiers{ Set: map[string]string{"x-split-leg": "goldilocks"}, @@ -673,7 +570,7 @@ func setupTestVariationDiscoveryChain( }, }, { - Weight: 100.0, + Weight: 0.5, Service: "lil-bit-side", RequestHeaders: &structs.HTTPHeaderModifiers{ Set: map[string]string{"x-split-leg": "small"}, @@ -1025,76 +922,12 @@ func setupTestVariationDiscoveryChain( Field: "header", FieldValue: "x-user-id", }, - { - Field: "query_parameter", - FieldValue: "my-pretty-param", - }, { SourceIP: true, Terminal: true, }, }, }, - }) - case "redirect-to-lb-node": - entries = append(entries, - &structs.ProxyConfigEntry{ - Kind: structs.ProxyDefaults, - Name: structs.ProxyConfigGlobal, - EnterpriseMeta: entMeta, - Protocol: "http", - Config: map[string]interface{}{ - "protocol": "http", - }, - }, - &structs.ServiceRouterConfigEntry{ - Kind: structs.ServiceRouter, - Name: "db", - EnterpriseMeta: entMeta, - Routes: []structs.ServiceRoute{ - { - Match: httpMatch(&structs.ServiceRouteHTTPMatch{ - PathPrefix: "/web", - }), - Destination: toService("web"), - }, - }, - }, - &structs.ServiceResolverConfigEntry{ - Kind: structs.ServiceResolver, - Name: "web", - EnterpriseMeta: entMeta, - LoadBalancer: &structs.LoadBalancer{ - Policy: "ring_hash", - RingHashConfig: &structs.RingHashConfig{ - MinimumRingSize: 20, - MaximumRingSize: 30, - }, - }, - }, - ) - case "resolver-with-lb": - entries = append(entries, - &structs.ProxyConfigEntry{ - Kind: structs.ProxyDefaults, - Name: structs.ProxyConfigGlobal, - EnterpriseMeta: entMeta, - Protocol: "http", - Config: map[string]interface{}{ - "protocol": "http", - }, - }, - &structs.ServiceResolverConfigEntry{ - Kind: structs.ServiceResolver, - Name: "db", - EnterpriseMeta: entMeta, - LoadBalancer: &structs.LoadBalancer{ - Policy: "ring_hash", - RingHashConfig: &structs.RingHashConfig{ - MinimumRingSize: 20, - MaximumRingSize: 30, - }, - }, }, ) default: diff --git a/agent/proxycfg/testing_upstreams_ce.go b/agent/proxycfg/testing_upstreams_ce.go index bac9bb5ce6d72..3b8e22d0bda8b 100644 --- a/agent/proxycfg/testing_upstreams_ce.go +++ b/agent/proxycfg/testing_upstreams_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package proxycfg diff --git a/agent/proxycfg/upstreams.go b/agent/proxycfg/upstreams.go index 209a3446d981e..21cbe1e81ff23 100644 --- a/agent/proxycfg/upstreams.go +++ b/agent/proxycfg/upstreams.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package proxycfg diff --git a/agent/proxycfg_test.go b/agent/proxycfg_test.go index c8141e407fafa..334af2cca0acc 100644 --- a/agent/proxycfg_test.go +++ b/agent/proxycfg_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -13,11 +13,9 @@ import ( "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent/grpc-external/limiter" + "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" - proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" - rtest "github.com/hashicorp/consul/internal/resource/resourcetest" - pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" "github.com/hashicorp/consul/testrpc" ) @@ -54,7 +52,7 @@ func TestAgent_local_proxycfg(t *testing.T) { // This is a little gross, but this gives us the layered pair of // local/catalog sources for now. - cfg := a.xdsServer.ProxyWatcher + cfg := a.xdsServer.CfgSrc var ( timer = time.After(100 * time.Millisecond) @@ -64,9 +62,9 @@ func TestAgent_local_proxycfg(t *testing.T) { var ( firstTime = true - ch <-chan proxysnapshot.ProxySnapshot + ch <-chan *proxycfg.ConfigSnapshot stc limiter.SessionTerminatedChan - cancel proxysnapshot.CancelFunc + cancel proxycfg.CancelFunc ) defer func() { if cancel != nil { @@ -87,7 +85,7 @@ func TestAgent_local_proxycfg(t *testing.T) { // Prior to fixes in https://github.com/hashicorp/consul/pull/16497 // this call to Watch() would deadlock. var err error - ch, stc, cancel, err = cfg.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, sid.ID).ID(), a.config.NodeName, token) + ch, stc, cancel, err = cfg.Watch(sid, a.config.NodeName, token) require.NoError(t, err) } select { diff --git a/agent/reload.go b/agent/reload.go index cf68481621bc5..ce31fd1a76c1c 100644 --- a/agent/reload.go +++ b/agent/reload.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/remote_exec.go b/agent/remote_exec.go index 876c1898620cd..770221ed2622f 100644 --- a/agent/remote_exec.go +++ b/agent/remote_exec.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/remote_exec_test.go b/agent/remote_exec_test.go index 2488f6b4f2e07..9994095078d16 100644 --- a/agent/remote_exec_test.go +++ b/agent/remote_exec_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -15,7 +15,6 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/go-uuid" @@ -359,9 +358,9 @@ func testHandleRemoteExec(t *testing.T, command string, expectedSubstring string retry.Run(t, func(r *retry.R) { event := &remoteExecEvent{ Prefix: "_rexec", - Session: makeRexecSession(r, a.Agent, ""), + Session: makeRexecSession(t, a.Agent, ""), } - defer destroySession(r, a.Agent, event.Session, "") + defer destroySession(t, a.Agent, event.Session, "") spec := &remoteExecSpec{ Command: command, @@ -430,7 +429,7 @@ func TestHandleRemoteExecFailed(t *testing.T) { testHandleRemoteExec(t, "echo failing;exit 2", "failing", "2") } -func makeRexecSession(t testutil.TestingTB, a *Agent, token string) string { +func makeRexecSession(t *testing.T, a *Agent, token string) string { args := structs.SessionRequest{ Datacenter: a.config.Datacenter, Op: structs.SessionCreate, @@ -449,7 +448,7 @@ func makeRexecSession(t testutil.TestingTB, a *Agent, token string) string { return out } -func destroySession(t testutil.TestingTB, a *Agent, session string, token string) { +func destroySession(t *testing.T, a *Agent, session string, token string) { args := structs.SessionRequest{ Datacenter: a.config.Datacenter, Op: structs.SessionDestroy, diff --git a/agent/retry_join.go b/agent/retry_join.go index eb010c0c22c47..a629aa04706e8 100644 --- a/agent/retry_join.go +++ b/agent/retry_join.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/retry_join_test.go b/agent/retry_join_test.go index 4184ab0a9f3df..af90205965b21 100644 --- a/agent/retry_join_test.go +++ b/agent/retry_join_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/router/grpc.go b/agent/router/grpc.go index ce3f079e86b97..9fe6355d4dcf1 100644 --- a/agent/router/grpc.go +++ b/agent/router/grpc.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package router diff --git a/agent/router/manager.go b/agent/router/manager.go index 07d55127f3c8b..cccbc27d081a5 100644 --- a/agent/router/manager.go +++ b/agent/router/manager.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 // Package servers provides a Manager interface for Manager managed // metadata.Server objects. The servers package manages servers from a Consul diff --git a/agent/router/manager_internal_test.go b/agent/router/manager_internal_test.go index 120a5f012c630..0e1fa28189a6d 100644 --- a/agent/router/manager_internal_test.go +++ b/agent/router/manager_internal_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package router diff --git a/agent/router/manager_test.go b/agent/router/manager_test.go index 6490164fda10c..708bb620a0da0 100644 --- a/agent/router/manager_test.go +++ b/agent/router/manager_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package router_test diff --git a/agent/router/router.go b/agent/router/router.go index c261b6ed7cd52..bdba22f42d41b 100644 --- a/agent/router/router.go +++ b/agent/router/router.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package router diff --git a/agent/router/router_test.go b/agent/router/router_test.go index 206b0befe811c..1064dea342a32 100644 --- a/agent/router/router_test.go +++ b/agent/router/router_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package router diff --git a/agent/router/serf_adapter.go b/agent/router/serf_adapter.go index d3a228ca3d5d7..f30449dc05dff 100644 --- a/agent/router/serf_adapter.go +++ b/agent/router/serf_adapter.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package router diff --git a/agent/router/serf_flooder.go b/agent/router/serf_flooder.go index 06d59d5c4a89d..34ef318377faa 100644 --- a/agent/router/serf_flooder.go +++ b/agent/router/serf_flooder.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package router diff --git a/agent/routine-leak-checker/leak_test.go b/agent/routine-leak-checker/leak_test.go index f6b3c2a74953c..91d84b071b3f3 100644 --- a/agent/routine-leak-checker/leak_test.go +++ b/agent/routine-leak-checker/leak_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package leakcheck diff --git a/agent/rpc/middleware/interceptors.go b/agent/rpc/middleware/interceptors.go index e783254a980d1..f614e06cea768 100644 --- a/agent/rpc/middleware/interceptors.go +++ b/agent/rpc/middleware/interceptors.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware @@ -12,10 +12,9 @@ import ( "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/consul-net-rpc/net/rpc" rpcRate "github.com/hashicorp/consul/agent/consul/rate" + "github.com/hashicorp/go-hclog" ) // RPCTypeInternal identifies the "RPC" request as coming from some internal @@ -26,11 +25,9 @@ import ( // Really what we are measuring here is a "cluster operation". The term we have // used for this historically is "RPC", so we continue to use that here. const RPCTypeInternal = "internal" - const RPCTypeNetRPC = "net/rpc" var metricRPCRequest = []string{"rpc", "server", "call"} - var requestLogName = strings.Join(metricRPCRequest, "_") var OneTwelveRPCSummary = []prometheus.SummaryDefinition{ @@ -189,20 +186,3 @@ func GetNetRPCRateLimitingInterceptor(requestLimitsHandler rpcRate.RequestLimits return requestLimitsHandler.Allow(op) } } - -func ChainedRPCPreBodyInterceptor(chain ...rpc.PreBodyInterceptor) rpc.PreBodyInterceptor { - if len(chain) == 0 { - panic("don't call this with zero interceptors") - } - if len(chain) == 1 { - return chain[0] - } - return func(reqServiceMethod string, sourceAddr net.Addr) error { - for _, interceptor := range chain { - if err := interceptor(reqServiceMethod, sourceAddr); err != nil { - return err - } - } - return nil - } -} diff --git a/agent/rpc/middleware/interceptors_test.go b/agent/rpc/middleware/interceptors_test.go index a8e07c8d4d2ba..a22837fc6d952 100644 --- a/agent/rpc/middleware/interceptors_test.go +++ b/agent/rpc/middleware/interceptors_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware diff --git a/agent/rpc/middleware/rate_limit_mappings.go b/agent/rpc/middleware/rate_limit_mappings.go index f9ca6a3333018..0df249c932338 100644 --- a/agent/rpc/middleware/rate_limit_mappings.go +++ b/agent/rpc/middleware/rate_limit_mappings.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware diff --git a/agent/rpc/middleware/recovery.go b/agent/rpc/middleware/recovery.go index 6c23eb3ed3fa6..df37f969d4120 100644 --- a/agent/rpc/middleware/recovery.go +++ b/agent/rpc/middleware/recovery.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package middleware diff --git a/agent/rpc/operator/service.go b/agent/rpc/operator/service.go index ec88c5aa04a99..6b3302c9f2e46 100644 --- a/agent/rpc/operator/service.go +++ b/agent/rpc/operator/service.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package operator @@ -94,8 +94,8 @@ func requireNotNil(v interface{}, name string) { var _ pboperator.OperatorServiceServer = (*Server)(nil) -func (s *Server) Register(registrar grpc.ServiceRegistrar) { - pboperator.RegisterOperatorServiceServer(registrar, s) +func (s *Server) Register(grpcServer *grpc.Server) { + pboperator.RegisterOperatorServiceServer(grpcServer, s) } // Backend defines the core integrations the Operator endpoint depends on. A diff --git a/agent/rpc/operator/service_test.go b/agent/rpc/operator/service_test.go index 3cc9e117d4425..465a6d6428d29 100644 --- a/agent/rpc/operator/service_test.go +++ b/agent/rpc/operator/service_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package operator diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index 2c6655be6682f..5c4e530e060ec 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peering @@ -121,8 +121,8 @@ func requireNotNil(v interface{}, name string) { var _ pbpeering.PeeringServiceServer = (*Server)(nil) -func (s *Server) Register(registrar grpc.ServiceRegistrar) { - pbpeering.RegisterPeeringServiceServer(registrar, s) +func (s *Server) Register(grpcServer *grpc.Server) { + pbpeering.RegisterPeeringServiceServer(grpcServer, s) } // Backend defines the core integrations the Peering endpoint depends on. A diff --git a/agent/rpc/peering/service_ce_test.go b/agent/rpc/peering/service_ce_test.go index b30a0ff59f7b0..d4e5fab0ba40e 100644 --- a/agent/rpc/peering/service_ce_test.go +++ b/agent/rpc/peering/service_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package peering_test diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index efc3bff697bb7..9fd6b06a163ab 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peering_test @@ -1833,9 +1833,9 @@ func newTestServer(t *testing.T, cb func(conf *consul.Config)) testingServer { conf.ACLResolverSettings.EnterpriseMeta = *conf.AgentEnterpriseMeta() deps := newDefaultDeps(t, conf) - externalGRPCServer := external.NewServer(deps.Logger, nil, deps.TLSConfigurator, rate.NullRequestLimitsHandler(), keepalive.ServerParameters{}, nil) + externalGRPCServer := external.NewServer(deps.Logger, nil, deps.TLSConfigurator, rate.NullRequestLimitsHandler(), keepalive.ServerParameters{}) - server, err := consul.NewServer(conf, deps, externalGRPCServer, nil, deps.Logger, nil) + server, err := consul.NewServer(conf, deps, externalGRPCServer, nil, deps.Logger) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, server.Shutdown()) @@ -1967,7 +1967,6 @@ func newDefaultDeps(t *testing.T, c *consul.Config) consul.Deps { NewRequestRecorderFunc: middleware.NewRequestRecorder, GetNetRPCInterceptorFunc: middleware.GetNetRPCInterceptor, XDSStreamLimiter: limiter.NewSessionLimiter(), - Registry: consul.NewTypeRegistry(), } } diff --git a/agent/rpc/peering/testing.go b/agent/rpc/peering/testing.go index 8989950ee29be..ddd9d43a8ad27 100644 --- a/agent/rpc/peering/testing.go +++ b/agent/rpc/peering/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peering diff --git a/agent/rpc/peering/testutil_ce_test.go b/agent/rpc/peering/testutil_ce_test.go index 7d9a0c286d265..d15d62e9f1cb1 100644 --- a/agent/rpc/peering/testutil_ce_test.go +++ b/agent/rpc/peering/testutil_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package peering_test diff --git a/agent/rpc/peering/validate.go b/agent/rpc/peering/validate.go index 2de6684d85ff9..1bd3f393bd748 100644 --- a/agent/rpc/peering/validate.go +++ b/agent/rpc/peering/validate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peering diff --git a/agent/rpc/peering/validate_test.go b/agent/rpc/peering/validate_test.go index 669baf41702fc..c5b3c6c7bdb08 100644 --- a/agent/rpc/peering/validate_test.go +++ b/agent/rpc/peering/validate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package peering diff --git a/agent/rpcclient/common.go b/agent/rpcclient/common.go index 316fb341a9356..8ff1573992362 100644 --- a/agent/rpcclient/common.go +++ b/agent/rpcclient/common.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package rpcclient diff --git a/agent/rpcclient/configentry/configentry.go b/agent/rpcclient/configentry/configentry.go index 2b38455beb074..ada7928dc1af7 100644 --- a/agent/rpcclient/configentry/configentry.go +++ b/agent/rpcclient/configentry/configentry.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package configentry diff --git a/agent/rpcclient/configentry/configentry_test.go b/agent/rpcclient/configentry/configentry_test.go index 92e6f4b3c88ae..9f526892fd11a 100644 --- a/agent/rpcclient/configentry/configentry_test.go +++ b/agent/rpcclient/configentry/configentry_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package configentry diff --git a/agent/rpcclient/configentry/view.go b/agent/rpcclient/configentry/view.go index 70271a9220e97..dae3208810bfa 100644 --- a/agent/rpcclient/configentry/view.go +++ b/agent/rpcclient/configentry/view.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package configentry diff --git a/agent/rpcclient/configentry/view_test.go b/agent/rpcclient/configentry/view_test.go index 0209c898cafed..37e642e5c36c1 100644 --- a/agent/rpcclient/configentry/view_test.go +++ b/agent/rpcclient/configentry/view_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package configentry diff --git a/agent/rpcclient/health/health.go b/agent/rpcclient/health/health.go index f062d2aac284e..8a65a50578aad 100644 --- a/agent/rpcclient/health/health.go +++ b/agent/rpcclient/health/health.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package health diff --git a/agent/rpcclient/health/health_test.go b/agent/rpcclient/health/health_test.go index 30900bc04cc01..2d8c57a3beba5 100644 --- a/agent/rpcclient/health/health_test.go +++ b/agent/rpcclient/health/health_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package health diff --git a/agent/rpcclient/health/streaming_test.go b/agent/rpcclient/health/streaming_test.go index 180b61f0eec65..3a0ba734ba9ab 100644 --- a/agent/rpcclient/health/streaming_test.go +++ b/agent/rpcclient/health/streaming_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package health diff --git a/agent/rpcclient/health/view.go b/agent/rpcclient/health/view.go index 8e08ba801e5f1..e1fffd3e23de6 100644 --- a/agent/rpcclient/health/view.go +++ b/agent/rpcclient/health/view.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package health diff --git a/agent/rpcclient/health/view_test.go b/agent/rpcclient/health/view_test.go index 83eba5ab41a07..6b4b8ee85798d 100644 --- a/agent/rpcclient/health/view_test.go +++ b/agent/rpcclient/health/view_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package health diff --git a/agent/service_checks_test.go b/agent/service_checks_test.go index 41372cc47dbbe..c567776587e36 100644 --- a/agent/service_checks_test.go +++ b/agent/service_checks_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/service_manager.go b/agent/service_manager.go index 355c73eb2c412..b7e38b393ace2 100644 --- a/agent/service_manager.go +++ b/agent/service_manager.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -148,8 +148,7 @@ func (w *serviceConfigWatch) register(ctx context.Context) error { // Merge the local registration with the central defaults and update this service // in the local state. - ns := w.registration.Service.WithNormalizedUpstreams() - merged, err := configentry.MergeServiceConfig(serviceDefaults, ns) + merged, err := configentry.MergeServiceConfig(serviceDefaults, w.registration.Service) if err != nil { return err } @@ -279,8 +278,7 @@ func (w *serviceConfigWatch) handleUpdate(ctx context.Context, event cache.Updat // Merge the local registration with the central defaults and update this service // in the local state. - ns := w.registration.Service.WithNormalizedUpstreams() - merged, err := configentry.MergeServiceConfig(serviceDefaults, ns) + merged, err := configentry.MergeServiceConfig(serviceDefaults, w.registration.Service) if err != nil { return err } diff --git a/agent/service_manager_test.go b/agent/service_manager_test.go index 289503a51ede4..724022d42ad90 100644 --- a/agent/service_manager_test.go +++ b/agent/service_manager_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/session_endpoint.go b/agent/session_endpoint.go index 90c3fa32bae7e..a9b9a6dee6baa 100644 --- a/agent/session_endpoint.go +++ b/agent/session_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/session_endpoint_test.go b/agent/session_endpoint_test.go index d44285f29bb78..5ce93db7a68fe 100644 --- a/agent/session_endpoint_test.go +++ b/agent/session_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -15,14 +15,13 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/types" "github.com/stretchr/testify/require" ) -func verifySession(t testutil.TestingTB, a *TestAgent, want structs.Session) { +func verifySession(t *testing.T, r *retry.R, a *TestAgent, want structs.Session) { t.Helper() args := &structs.SessionSpecificRequest{ @@ -31,10 +30,10 @@ func verifySession(t testutil.TestingTB, a *TestAgent, want structs.Session) { } var out structs.IndexedSessions if err := a.RPC(context.Background(), "Session.Get", args, &out); err != nil { - t.Fatalf("err: %v", err) + r.Fatalf("err: %v", err) } if len(out.Sessions) != 1 { - t.Fatalf("bad: %#v", out.Sessions) + r.Fatalf("bad: %#v", out.Sessions) } // Make a copy so we don't modify the state store copy for an in-mem @@ -124,7 +123,7 @@ func TestSessionCreate(t *testing.T) { LockDelay: 20 * time.Second, Behavior: structs.SessionKeysRelease, } - verifySession(r, a, want) + verifySession(t, r, a, want) }) } @@ -189,7 +188,7 @@ func TestSessionCreate_NodeChecks(t *testing.T) { LockDelay: 20 * time.Second, Behavior: structs.SessionKeysRelease, } - verifySession(r, a, want) + verifySession(t, r, a, want) }) } @@ -251,7 +250,7 @@ func TestSessionCreate_Delete(t *testing.T) { LockDelay: 20 * time.Second, Behavior: structs.SessionKeysDelete, } - verifySession(r, a, want) + verifySession(t, r, a, want) }) } @@ -289,7 +288,7 @@ func TestSessionCreate_DefaultCheck(t *testing.T) { LockDelay: 20 * time.Second, Behavior: structs.SessionKeysRelease, } - verifySession(r, a, want) + verifySession(t, r, a, want) }) } @@ -330,7 +329,7 @@ func TestSessionCreate_NoCheck(t *testing.T) { LockDelay: 20 * time.Second, Behavior: structs.SessionKeysRelease, } - verifySession(r, a, want) + verifySession(t, r, a, want) }) }) @@ -360,7 +359,7 @@ func TestSessionCreate_NoCheck(t *testing.T) { LockDelay: 20 * time.Second, Behavior: structs.SessionKeysRelease, } - verifySession(r, a, want) + verifySession(t, r, a, want) }) }) @@ -392,7 +391,7 @@ func TestSessionCreate_NoCheck(t *testing.T) { LockDelay: 20 * time.Second, Behavior: structs.SessionKeysRelease, } - verifySession(r, a, want) + verifySession(t, r, a, want) }) }) } @@ -431,7 +430,7 @@ func makeTestSessionDelete(t *testing.T, srv *HTTPHandlers) string { return sessResp.ID } -func makeTestSessionTTL(t testutil.TestingTB, srv *HTTPHandlers, ttl string) string { +func makeTestSessionTTL(t *testing.T, srv *HTTPHandlers, ttl string) string { t.Helper() // Create Session with TTL body := bytes.NewBuffer(nil) @@ -489,7 +488,7 @@ func TestSessionCustomTTL(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") retry.Run(t, func(r *retry.R) { - id := makeTestSessionTTL(r, a.srv, ttl.String()) + id := makeTestSessionTTL(t, a.srv, ttl.String()) req, _ := http.NewRequest("GET", "/v1/session/info/"+id, nil) resp := httptest.NewRecorder() diff --git a/agent/setup.go b/agent/setup.go index 772cdfbcd55bf..88c883f9bdb71 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -28,7 +28,6 @@ import ( "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/consul/usagemetrics" "github.com/hashicorp/consul/agent/consul/xdscapacity" - "github.com/hashicorp/consul/agent/discovery" "github.com/hashicorp/consul/agent/grpc-external/limiter" grpcInt "github.com/hashicorp/consul/agent/grpc-internal" "github.com/hashicorp/consul/agent/grpc-internal/balancer" @@ -138,15 +137,17 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer, providedLogger hcl cfg.Telemetry.PrometheusOpts.SummaryDefinitions = summaries var extraSinks []metrics.MetricSink - // This values is set late within newNodeIDFromConfig above - cfg.Cloud.NodeID = cfg.NodeID + if cfg.IsCloudEnabled() { + // This values is set late within newNodeIDFromConfig above + cfg.Cloud.NodeID = cfg.NodeID - d.HCP, err = hcp.NewDeps(cfg.Cloud, d.Logger.Named("hcp"), cfg.DataDir) - if err != nil { - return d, err - } - if d.HCP.Sink != nil { - extraSinks = append(extraSinks, d.HCP.Sink) + d.HCP, err = hcp.NewDeps(cfg.Cloud, d.Logger.Named("hcp")) + if err != nil { + return d, err + } + if d.HCP.Sink != nil { + extraSinks = append(extraSinks, d.HCP.Sink) + } } d.MetricsConfig, err = lib.InitTelemetry(cfg.Telemetry, d.Logger, extraSinks...) @@ -184,8 +185,6 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer, providedLogger hcl TestOverrideCAChangeInitialDelay: cfg.ConnectTestCALeafRootChangeSpread, }, }) - // Set the leaf cert manager in the embedded deps type so it can be used by consul servers. - d.Deps.LeafCertManager = d.LeafCertManager agentType := "client" if cfg.ServerMode { @@ -261,8 +260,6 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer, providedLogger hcl d.XDSStreamLimiter = limiter.NewSessionLimiter() - d.Registry = consul.NewTypeRegistry() - return d, nil } @@ -272,9 +269,6 @@ func (bd BaseDeps) Close() { bd.AutoConfig.Stop() bd.LeafCertManager.Stop() bd.MetricsConfig.Cancel() - if bd.HCP.Sink != nil { - bd.HCP.Sink.Shutdown() - } for _, fn := range []func(){bd.deregisterBalancer, bd.deregisterResolver, bd.stopHostCollector} { if fn != nil { @@ -383,9 +377,7 @@ func getPrometheusDefs(cfg *config.RuntimeConfig, isServer bool) ([]prometheus.G gauges = append(gauges, verifierGauges) } - if isServer && - (cfg.RaftLogStoreConfig.Backend == consul.LogStoreBackendWAL || - cfg.RaftLogStoreConfig.Backend == consul.LogStoreBackendDefault) { + if isServer && cfg.RaftLogStoreConfig.Backend == consul.LogStoreBackendWAL { walGauges := make([]prometheus.GaugeDefinition, 0) for _, d := range wal.MetricDefinitions.Gauges { @@ -435,7 +427,6 @@ func getPrometheusDefs(cfg *config.RuntimeConfig, isServer bool) ([]prometheus.G consul.CatalogCounters, consul.ClientCounters, consul.RPCCounters, - discovery.DNSCounters, grpcWare.StatsCounters, local.StateCounters, xds.StatsCounters, @@ -457,9 +448,7 @@ func getPrometheusDefs(cfg *config.RuntimeConfig, isServer bool) ([]prometheus.G } counters = append(counters, verifierCounters) } - if isServer && - (cfg.RaftLogStoreConfig.Backend == consul.LogStoreBackendWAL || - cfg.RaftLogStoreConfig.Backend == consul.LogStoreBackendDefault) { + if isServer && cfg.RaftLogStoreConfig.Backend == consul.LogStoreBackendWAL { walCounters := make([]prometheus.CounterDefinition, 0) for _, d := range wal.MetricDefinitions.Counters { walCounters = append(walCounters, prometheus.CounterDefinition{ diff --git a/agent/setup_ce.go b/agent/setup_ce.go index 1269e5282c021..46c4b80eb4efb 100644 --- a/agent/setup_ce.go +++ b/agent/setup_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/sidecar_service.go b/agent/sidecar_service.go index 8e57d5930bc11..7dfb067b50ef0 100644 --- a/agent/sidecar_service.go +++ b/agent/sidecar_service.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/sidecar_service_test.go b/agent/sidecar_service_test.go index fd39a5a284a10..4960dd73d0542 100644 --- a/agent/sidecar_service_test.go +++ b/agent/sidecar_service_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/signal_unix.go b/agent/signal_unix.go index 173a82b96bd09..bd0b3e7793cdd 100644 --- a/agent/signal_unix.go +++ b/agent/signal_unix.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !windows +// +build !windows package agent diff --git a/agent/signal_windows.go b/agent/signal_windows.go index dc6d3cc5461a9..c6ea0c980ffb6 100644 --- a/agent/signal_windows.go +++ b/agent/signal_windows.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build windows +// +build windows package agent diff --git a/agent/snapshot_endpoint.go b/agent/snapshot_endpoint.go index 06805ae5f8c19..60d986256433b 100644 --- a/agent/snapshot_endpoint.go +++ b/agent/snapshot_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/snapshot_endpoint_test.go b/agent/snapshot_endpoint_test.go index 31d731a26ef07..340cee392d829 100644 --- a/agent/snapshot_endpoint_test.go +++ b/agent/snapshot_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/status_endpoint.go b/agent/status_endpoint.go index 4a40ec8910270..86f9f1a5d0193 100644 --- a/agent/status_endpoint.go +++ b/agent/status_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/status_endpoint_test.go b/agent/status_endpoint_test.go index db231fbc9b834..5be9d6be64a3e 100644 --- a/agent/status_endpoint_test.go +++ b/agent/status_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/streaming_test.go b/agent/streaming_test.go index fed1e8126d41d..9074f66e83441 100644 --- a/agent/streaming_test.go +++ b/agent/streaming_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/structs/acl.go b/agent/structs/acl.go index d856ce0af2eaf..e611d22b89545 100644 --- a/agent/structs/acl.go +++ b/agent/structs/acl.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -9,11 +9,11 @@ import ( "errors" "fmt" "hash" + "hash/fnv" "sort" "strings" "time" - "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib/stringslice" "golang.org/x/crypto/blake2b" @@ -63,10 +63,6 @@ agent_prefix "" { event_prefix "" { policy = "%[1]s" } -identity_prefix "" { - policy = "%[1]s" - intentions = "%[1]s" -} key_prefix "" { policy = "%[1]s" } @@ -130,7 +126,6 @@ type ACLIdentity interface { RoleIDs() []string ServiceIdentityList() []*ACLServiceIdentity NodeIdentityList() []*ACLNodeIdentity - TemplatedPolicyList() []*ACLTemplatedPolicy IsExpired(asOf time.Time) bool IsLocal() bool EnterpriseMetadata() *acl.EnterpriseMeta @@ -182,20 +177,22 @@ func (s *ACLServiceIdentity) EstimateSize() int { } func (s *ACLServiceIdentity) SyntheticPolicy(entMeta *acl.EnterpriseMeta) *ACLPolicy { - // use templated policy to generate synthetic policy - templatedPolicy := ACLTemplatedPolicy{ - TemplateID: ACLTemplatedPolicyServiceID, - TemplateName: api.ACLTemplatedPolicyServiceName, - Datacenters: s.Datacenters, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: s.ServiceName, - }, - } - // Given that we validate this string name before persisting, we do not - // expect any errors from generating the synthetic policy - policy, _ := templatedPolicy.SyntheticPolicy(entMeta) - + // have to escape it before doing the following interpolation. + rules := aclServiceIdentityRules(s.ServiceName, entMeta) + + hasher := fnv.New128a() + hashID := fmt.Sprintf("%x", hasher.Sum([]byte(rules))) + + policy := &ACLPolicy{} + policy.ID = hashID + policy.Name = fmt.Sprintf("synthetic-policy-%s", hashID) + sn := NewServiceName(s.ServiceName, entMeta) + policy.Description = fmt.Sprintf("synthetic policy for service identity %q", sn.String()) + policy.Rules = rules + policy.Datacenters = s.Datacenters + policy.EnterpriseMeta.Merge(entMeta) + policy.SetHash(true) return policy } @@ -252,20 +249,21 @@ func (s *ACLNodeIdentity) EstimateSize() int { } func (s *ACLNodeIdentity) SyntheticPolicy(entMeta *acl.EnterpriseMeta) *ACLPolicy { - // use templated policy to generate synthetic policy - templatedPolicy := ACLTemplatedPolicy{ - TemplateID: ACLTemplatedPolicyNodeID, - TemplateName: api.ACLTemplatedPolicyNodeName, - Datacenters: []string{s.Datacenter}, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: s.NodeName, - }, - } - // Given that we validate this string name before persisting, we do not - // expect any errors from generating the synthetic policy - policy, _ := templatedPolicy.SyntheticPolicy(entMeta) - + // have to escape it before doing the following interpolation. + rules := aclNodeIdentityRules(s.NodeName, entMeta) + + hasher := fnv.New128a() + hashID := fmt.Sprintf("%x", hasher.Sum([]byte(rules))) + + policy := &ACLPolicy{} + policy.ID = hashID + policy.Name = fmt.Sprintf("synthetic-policy-%s", hashID) + policy.Description = fmt.Sprintf("synthetic policy for node identity %q", s.NodeName) + policy.Rules = rules + policy.Datacenters = []string{s.Datacenter} + policy.EnterpriseMeta.Merge(entMeta) + policy.SetHash(true) return policy } @@ -316,9 +314,6 @@ type ACLToken struct { // The node identities that this token should be allowed to manage. NodeIdentities ACLNodeIdentities `json:",omitempty"` - // The templated policies to generate synthetic policies for. - TemplatedPolicies ACLTemplatedPolicies `json:",omitempty"` - // Whether this token is DC local. This means that it will not be synced // to the ACL datacenter and replicated to others. Local bool @@ -399,7 +394,6 @@ func (t *ACLToken) Clone() *ACLToken { t2.Roles = nil t2.ServiceIdentities = nil t2.NodeIdentities = nil - t2.TemplatedPolicies = nil if len(t.Policies) > 0 { t2.Policies = make([]ACLTokenPolicyLink, len(t.Policies)) @@ -421,12 +415,6 @@ func (t *ACLToken) Clone() *ACLToken { t2.NodeIdentities[i] = n.Clone() } } - if len(t.TemplatedPolicies) > 0 { - t2.TemplatedPolicies = make([]*ACLTemplatedPolicy, len(t.TemplatedPolicies)) - for idx, tp := range t.TemplatedPolicies { - t2.TemplatedPolicies[idx] = tp.Clone() - } - } return &t2 } @@ -535,10 +523,6 @@ func (t *ACLToken) SetHash(force bool) []byte { nodeID.AddToHash(hash) } - for _, templatedPolicy := range t.TemplatedPolicies { - templatedPolicy.AddToHash(hash) - } - t.EnterpriseMeta.AddToHash(hash, false) // Finalize the hash @@ -565,9 +549,6 @@ func (t *ACLToken) EstimateSize() int { for _, nodeID := range t.NodeIdentities { size += nodeID.EstimateSize() } - for _, templatedPolicy := range t.TemplatedPolicies { - size += templatedPolicy.EstimateSize() - } return size + t.EnterpriseMeta.EstimateSize() } @@ -582,7 +563,6 @@ type ACLTokenListStub struct { Roles []ACLTokenRoleLink `json:",omitempty"` ServiceIdentities ACLServiceIdentities `json:",omitempty"` NodeIdentities ACLNodeIdentities `json:",omitempty"` - TemplatedPolicies ACLTemplatedPolicies `json:",omitempty"` Local bool AuthMethod string `json:",omitempty"` ExpirationTime *time.Time `json:",omitempty"` @@ -605,7 +585,6 @@ func (token *ACLToken) Stub() *ACLTokenListStub { Roles: token.Roles, ServiceIdentities: token.ServiceIdentities, NodeIdentities: token.NodeIdentities, - TemplatedPolicies: token.TemplatedPolicies, Local: token.Local, AuthMethod: token.AuthMethod, ExpirationTime: token.ExpirationTime, @@ -891,9 +870,6 @@ type ACLRole struct { // List of nodes to generate synthetic policies for. NodeIdentities ACLNodeIdentities `json:",omitempty"` - // List of templated policies to generate synthethic policies for. - TemplatedPolicies ACLTemplatedPolicies `json:",omitempty"` - // Hash of the contents of the role // This does not take into account the ID (which is immutable) // nor the raft metadata. @@ -933,7 +909,6 @@ func (r *ACLRole) Clone() *ACLRole { r2.Policies = nil r2.ServiceIdentities = nil r2.NodeIdentities = nil - r2.TemplatedPolicies = nil if len(r.Policies) > 0 { r2.Policies = make([]ACLRolePolicyLink, len(r.Policies)) @@ -951,12 +926,6 @@ func (r *ACLRole) Clone() *ACLRole { r2.NodeIdentities[i] = n.Clone() } } - if len(r.TemplatedPolicies) > 0 { - r2.TemplatedPolicies = make([]*ACLTemplatedPolicy, len(r.TemplatedPolicies)) - for i, n := range r.TemplatedPolicies { - r2.TemplatedPolicies[i] = n.Clone() - } - } return &r2 } @@ -988,9 +957,6 @@ func (r *ACLRole) SetHash(force bool) []byte { for _, nodeID := range r.NodeIdentities { nodeID.AddToHash(hash) } - for _, templatedPolicy := range r.TemplatedPolicies { - templatedPolicy.AddToHash(hash) - } r.EnterpriseMeta.AddToHash(hash, false) @@ -1018,9 +984,6 @@ func (r *ACLRole) EstimateSize() int { for _, nodeID := range r.NodeIdentities { size += nodeID.EstimateSize() } - for _, templatedPolicy := range r.TemplatedPolicies { - size += templatedPolicy.EstimateSize() - } return size + r.EnterpriseMeta.EstimateSize() } @@ -1069,36 +1032,6 @@ const ( // } // } BindingRuleBindTypeNode = "node" - - // BindingRuleBindTypeTemplatedPolicy is the binding rule bind type that - // assigns a TemplatedPolicy to the token that is created using the value - // of the computed BindVars as template variables and BindName as template name like: - // - // &ACLToken{ - // ...other fields... - // TemplatedPolicies: []*ACLTemplatedPolicy{ - // &ACLTemplatedPolicy{ - // TemplateName: "", - // TemplateVariables: &ACLTemplatedPolicyVariables{} - // }, - // }, - // } - BindingRuleBindTypeTemplatedPolicy = "templated-policy" - - // BindingRuleBindTypePolicy is the binding rule bind type that only allows - // the binding rule to function if a policy with the given name (BindName) - // exists at login-time. If it does the token that is created is directly - // linked to that policy like: - // - // &ACLToken{ - // ...other fields... - // Policies: *ACLTokenPolicyLink{ - // { Name: "" }, - // } - // } - // - // If it does not exist at login-time the rule is ignored. - BindingRuleBindTypePolicy = "policy" ) type ACLBindingRule struct { @@ -1118,11 +1051,8 @@ type ACLBindingRule struct { // BindType adjusts how this binding rule is applied at login time. The // valid values are: // - // - BindingRuleBindTypeService = "service" - // - BindingRuleBindTypeNode = "node" - // - BindingRuleBindTypeRole = "role" - // - BindingRuleBindTypePolicy = "policy" - // - BindingRuleBindTypeTemplatedPolicy = "templated-policy" + // - BindingRuleBindTypeService = "service" + // - BindingRuleBindTypeRole = "role" BindType string // BindName is the target of the binding. Can be lightly templated using @@ -1130,10 +1060,6 @@ type ACLBindingRule struct { // upon the BindType. BindName string - // BindVars is a the variables used when binding rule type is `templated-policy`. Can be lightly - // templated using HIL ${foo} syntax from available field names. - BindVars *ACLTemplatedPolicyVariables `json:",omitempty"` - // Embedded Enterprise ACL metadata acl.EnterpriseMeta `mapstructure:",squash"` @@ -1398,7 +1324,7 @@ type ACLTokenListResponse struct { } // ACLTokenBatchGetRequest is used for reading multiple tokens, this is -// different from the token list request in that only tokens with the +// different from the the token list request in that only tokens with the // the requested ids are returned type ACLTokenBatchGetRequest struct { AccessorIDs []string // List of accessor ids to fetch @@ -1919,10 +1845,6 @@ func (id *AgentRecoveryTokenIdentity) NodeIdentityList() []*ACLNodeIdentity { return nil } -func (id *AgentRecoveryTokenIdentity) TemplatedPolicyList() []*ACLTemplatedPolicy { - return nil -} - func (id *AgentRecoveryTokenIdentity) IsExpired(asOf time.Time) bool { return false } @@ -1971,10 +1893,6 @@ func (i *ACLServerIdentity) NodeIdentityList() []*ACLNodeIdentity { return nil } -func (i *ACLServerIdentity) TemplatedPolicyList() []*ACLTemplatedPolicy { - return nil -} - func (i *ACLServerIdentity) IsExpired(asOf time.Time) bool { return false } diff --git a/agent/structs/acl_cache.go b/agent/structs/acl_cache.go index 15f3a2edc3c26..46d4fdd28117b 100644 --- a/agent/structs/acl_cache.go +++ b/agent/structs/acl_cache.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/acl_cache_test.go b/agent/structs/acl_cache_test.go index 57e218ff21e32..e390da960f4ed 100644 --- a/agent/structs/acl_cache_test.go +++ b/agent/structs/acl_cache_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/acl_ce.go b/agent/structs/acl_ce.go index 166558ae65af8..9cc4e7813ce85 100644 --- a/agent/structs/acl_ce.go +++ b/agent/structs/acl_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/acl_templated_policy.go b/agent/structs/acl_templated_policy.go deleted file mode 100644 index 04c85515ff1d8..0000000000000 --- a/agent/structs/acl_templated_policy.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package structs - -import ( - "bytes" - _ "embed" - "fmt" - "hash" - "hash/fnv" - "html/template" - - "github.com/hashicorp/go-multierror" - "github.com/xeipuuv/gojsonschema" - "golang.org/x/exp/slices" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/lib/stringslice" -) - -//go:embed acltemplatedpolicy/schemas/node.json -var ACLTemplatedPolicyNodeSchema string - -//go:embed acltemplatedpolicy/schemas/service.json -var ACLTemplatedPolicyServiceSchema string - -//go:embed acltemplatedpolicy/schemas/workload-identity.json -var ACLTemplatedPolicyWorkloadIdentitySchema string - -//go:embed acltemplatedpolicy/schemas/api-gateway.json -var ACLTemplatedPolicyAPIGatewaySchema string - -type ACLTemplatedPolicies []*ACLTemplatedPolicy - -const ( - ACLTemplatedPolicyServiceID = "00000000-0000-0000-0000-000000000003" - ACLTemplatedPolicyNodeID = "00000000-0000-0000-0000-000000000004" - ACLTemplatedPolicyDNSID = "00000000-0000-0000-0000-000000000005" - ACLTemplatedPolicyNomadServerID = "00000000-0000-0000-0000-000000000006" - ACLTemplatedPolicyWorkloadIdentityID = "00000000-0000-0000-0000-000000000007" - ACLTemplatedPolicyAPIGatewayID = "00000000-0000-0000-0000-000000000008" - ACLTemplatedPolicyNomadClientID = "00000000-0000-0000-0000-000000000009" - - ACLTemplatedPolicyServiceDescription = "Gives the token or role permissions to register a service and discover services in the Consul catalog. It also gives the specified service's sidecar proxy the permission to discover and route traffic to other services." - ACLTemplatedPolicyNodeDescription = "Gives the token or role permissions for a register an agent/node into the catalog. A node is typically a consul agent but can also be a physical server, cloud instance or a container." - ACLTemplatedPolicyDNSDescription = "Gives the token or role permissions for the Consul DNS to query services in the network." - ACLTemplatedPolicyNomadServerDescription = "Gives the token or role permissions required for integration with a nomad server." - ACLTemplatedPolicyWorkloadIdentityDescription = "Gives the token or role permissions for a specific workload identity." - ACLTemplatedPolicyAPIGatewayDescription = "Gives the token or role permissions for a Consul api gateway" - ACLTemplatedPolicyNomadClientDescription = "Gives the token or role permissions required for integration with a nomad client." - - ACLTemplatedPolicyNoRequiredVariablesSchema = "" // catch-all schema for all templated policy that don't require a schema -) - -// ACLTemplatedPolicyBase contains basic information about builtin templated policies -// template name, id, template code and schema -type ACLTemplatedPolicyBase struct { - TemplateName string - TemplateID string - Schema string - Template string - Description string -} - -var ( - // Note: when adding a new builtin template, ensure you update `command/acl/templatedpolicy/formatter.go` - // to handle the new templates required variables and schema. - aclTemplatedPoliciesList = map[string]*ACLTemplatedPolicyBase{ - api.ACLTemplatedPolicyServiceName: { - TemplateID: ACLTemplatedPolicyServiceID, - TemplateName: api.ACLTemplatedPolicyServiceName, - Schema: ACLTemplatedPolicyServiceSchema, - Template: ACLTemplatedPolicyService, - Description: ACLTemplatedPolicyServiceDescription, - }, - api.ACLTemplatedPolicyNodeName: { - TemplateID: ACLTemplatedPolicyNodeID, - TemplateName: api.ACLTemplatedPolicyNodeName, - Schema: ACLTemplatedPolicyNodeSchema, - Template: ACLTemplatedPolicyNode, - Description: ACLTemplatedPolicyNodeDescription, - }, - api.ACLTemplatedPolicyDNSName: { - TemplateID: ACLTemplatedPolicyDNSID, - TemplateName: api.ACLTemplatedPolicyDNSName, - Schema: ACLTemplatedPolicyNoRequiredVariablesSchema, - Template: ACLTemplatedPolicyDNS, - Description: ACLTemplatedPolicyDNSDescription, - }, - api.ACLTemplatedPolicyNomadServerName: { - TemplateID: ACLTemplatedPolicyNomadServerID, - TemplateName: api.ACLTemplatedPolicyNomadServerName, - Schema: ACLTemplatedPolicyNoRequiredVariablesSchema, - Template: ACLTemplatedPolicyNomadServer, - Description: ACLTemplatedPolicyNomadServerDescription, - }, - api.ACLTemplatedPolicyWorkloadIdentityName: { - TemplateID: ACLTemplatedPolicyWorkloadIdentityID, - TemplateName: api.ACLTemplatedPolicyWorkloadIdentityName, - Schema: ACLTemplatedPolicyWorkloadIdentitySchema, - Template: ACLTemplatedPolicyWorkloadIdentity, - Description: ACLTemplatedPolicyWorkloadIdentityDescription, - }, - api.ACLTemplatedPolicyAPIGatewayName: { - TemplateID: ACLTemplatedPolicyAPIGatewayID, - TemplateName: api.ACLTemplatedPolicyAPIGatewayName, - Schema: ACLTemplatedPolicyAPIGatewaySchema, - Template: ACLTemplatedPolicyAPIGateway, - Description: ACLTemplatedPolicyAPIGatewayDescription, - }, - api.ACLTemplatedPolicyNomadClientName: { - TemplateID: ACLTemplatedPolicyNomadClientID, - TemplateName: api.ACLTemplatedPolicyNomadClientName, - Schema: ACLTemplatedPolicyNoRequiredVariablesSchema, - Template: ACLTemplatedPolicyNomadClient, - Description: ACLTemplatedPolicyNomadClientDescription, - }, - } -) - -// ACLTemplatedPolicy represents a template used to generate a `synthetic` policy -// given some input variables. -type ACLTemplatedPolicy struct { - // TemplateID are hidden from all displays and should not be exposed to the users. - TemplateID string `json:",omitempty"` - - // TemplateName is used for display purposes mostly and should not be used for policy rendering. - TemplateName string `json:",omitempty"` - - // TemplateVariables are input variables required to render templated policies. - TemplateVariables *ACLTemplatedPolicyVariables `json:",omitempty"` - - // Datacenters that the synthetic policy will be valid within. - // - No wildcards allowed - // - If empty then the synthetic policy is valid within all datacenters - // - // This is kept for legacy reasons to enable us to replace Node/Service Identities by templated policies. - // - // Only valid for global tokens. It is an error to specify this for local tokens. - Datacenters []string `json:",omitempty"` -} - -// ACLTemplatedPolicyVariables are input variables required to render templated policies. -type ACLTemplatedPolicyVariables struct { - Name string `json:"name,omitempty"` -} - -func (tp *ACLTemplatedPolicy) Clone() *ACLTemplatedPolicy { - tp2 := *tp - - tp2.TemplateVariables = nil - if tp.TemplateVariables != nil { - tp2.TemplateVariables = tp.TemplateVariables.Clone() - } - tp2.Datacenters = stringslice.CloneStringSlice(tp.Datacenters) - - return &tp2 -} - -func (tp *ACLTemplatedPolicy) AddToHash(h hash.Hash) { - h.Write([]byte(tp.TemplateID)) - h.Write([]byte(tp.TemplateName)) - - if tp.TemplateVariables != nil { - tp.TemplateVariables.AddToHash(h) - } - for _, dc := range tp.Datacenters { - h.Write([]byte(dc)) - } -} - -func (tv *ACLTemplatedPolicyVariables) AddToHash(h hash.Hash) { - h.Write([]byte(tv.Name)) -} - -func (tv *ACLTemplatedPolicyVariables) Clone() *ACLTemplatedPolicyVariables { - tv2 := *tv - return &tv2 -} - -// validates templated policy variables against schema. -func (tp *ACLTemplatedPolicy) ValidateTemplatedPolicy(schema string) error { - if schema == "" { - return nil - } - - loader := gojsonschema.NewStringLoader(schema) - dataloader := gojsonschema.NewGoLoader(tp.TemplateVariables) - res, err := gojsonschema.Validate(loader, dataloader) - if err != nil { - return fmt.Errorf("failed to load json schema for validation %w", err) - } - - // validate service and node identity names - if tp.TemplateVariables != nil { - if tp.TemplateName == api.ACLTemplatedPolicyServiceName && !acl.IsValidServiceIdentityName(tp.TemplateVariables.Name) { - return fmt.Errorf("service identity %q has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed", tp.TemplateVariables.Name) - } - - if tp.TemplateName == api.ACLTemplatedPolicyNodeName && !acl.IsValidNodeIdentityName(tp.TemplateVariables.Name) { - return fmt.Errorf("node identity %q has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed", tp.TemplateVariables.Name) - } - } - - if res.Valid() { - return nil - } - - var merr *multierror.Error - - for _, resultError := range res.Errors() { - merr = multierror.Append(merr, fmt.Errorf(resultError.Description())) - } - return merr.ErrorOrNil() -} - -func (tp *ACLTemplatedPolicy) EstimateSize() int { - size := len(tp.TemplateName) + len(tp.TemplateID) + tp.TemplateVariables.EstimateSize() - for _, dc := range tp.Datacenters { - size += len(dc) - } - - return size -} - -func (tv *ACLTemplatedPolicyVariables) EstimateSize() int { - return len(tv.Name) -} - -// SyntheticPolicy generates a policy based on templated policies' ID and variables -// -// Given that we validate this string name before persisting, we do not -// have to escape it before doing the following interpolation. -func (tp *ACLTemplatedPolicy) SyntheticPolicy(entMeta *acl.EnterpriseMeta) (*ACLPolicy, error) { - rules, err := tp.aclTemplatedPolicyRules(entMeta) - if err != nil { - return nil, err - } - hasher := fnv.New128a() - hashID := fmt.Sprintf("%x", hasher.Sum([]byte(rules))) - - policy := &ACLPolicy{ - Rules: rules, - ID: hashID, - Name: fmt.Sprintf("synthetic-policy-%s", hashID), - Datacenters: tp.Datacenters, - Description: fmt.Sprintf("synthetic policy generated from templated policy: %s", tp.TemplateName), - } - policy.EnterpriseMeta.Merge(entMeta) - policy.SetHash(true) - - return policy, nil -} - -func (tp *ACLTemplatedPolicy) aclTemplatedPolicyRules(entMeta *acl.EnterpriseMeta) (string, error) { - if entMeta == nil { - entMeta = DefaultEnterpriseMetaInDefaultPartition() - } - entMeta.Normalize() - - tpl := template.New(tp.TemplateName) - tmplCode, ok := aclTemplatedPoliciesList[tp.TemplateName] - if !ok { - return "", fmt.Errorf("acl templated policy does not exist: %s", tp.TemplateName) - } - - parsedTpl, err := tpl.Parse(tmplCode.Template) - if err != nil { - return "", fmt.Errorf("an error occured when parsing template structs: %w", err) - } - var buf bytes.Buffer - err = parsedTpl.Execute(&buf, struct { - *ACLTemplatedPolicyVariables - Namespace string - Partition string - }{ - Namespace: entMeta.NamespaceOrDefault(), - Partition: entMeta.PartitionOrDefault(), - ACLTemplatedPolicyVariables: tp.TemplateVariables, - }) - if err != nil { - return "", fmt.Errorf("an error occured when executing on templated policy variables: %w", err) - } - - return buf.String(), nil -} - -// Deduplicate returns a new list of templated policies without duplicates. -// compares values of template variables to ensure no duplicates -func (tps ACLTemplatedPolicies) Deduplicate() ACLTemplatedPolicies { - list := make(map[string][]ACLTemplatedPolicyVariables) - var out ACLTemplatedPolicies - - for _, tp := range tps { - // checks if template name already in the unique list - _, found := list[tp.TemplateName] - if !found { - list[tp.TemplateName] = make([]ACLTemplatedPolicyVariables, 0) - } - templateSchema := aclTemplatedPoliciesList[tp.TemplateName].Schema - - // if schema is empty, template does not require variables - if templateSchema == "" { - if !found { - out = append(out, tp) - } - continue - } - - if !slices.Contains(list[tp.TemplateName], *tp.TemplateVariables) { - list[tp.TemplateName] = append(list[tp.TemplateName], *tp.TemplateVariables) - out = append(out, tp) - } - } - - return out -} - -func GetACLTemplatedPolicyBase(templateName string) (*ACLTemplatedPolicyBase, bool) { - if orig, found := aclTemplatedPoliciesList[templateName]; found { - copy := *orig - return ©, found - } - - return nil, false -} - -// GetACLTemplatedPolicyList returns a copy of the list of templated policies -func GetACLTemplatedPolicyList() map[string]*ACLTemplatedPolicyBase { - m := make(map[string]*ACLTemplatedPolicyBase, len(aclTemplatedPoliciesList)) - for k, v := range aclTemplatedPoliciesList { - m[k] = v - } - - return m -} diff --git a/agent/structs/acl_templated_policy_ce.go b/agent/structs/acl_templated_policy_ce.go deleted file mode 100644 index 23e656f0fb17e..0000000000000 --- a/agent/structs/acl_templated_policy_ce.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package structs - -import _ "embed" - -//go:embed acltemplatedpolicy/policies/ce/service.hcl -var ACLTemplatedPolicyService string - -//go:embed acltemplatedpolicy/policies/ce/node.hcl -var ACLTemplatedPolicyNode string - -//go:embed acltemplatedpolicy/policies/ce/dns.hcl -var ACLTemplatedPolicyDNS string - -//go:embed acltemplatedpolicy/policies/ce/nomad-server.hcl -var ACLTemplatedPolicyNomadServer string - -//go:embed acltemplatedpolicy/policies/ce/workload-identity.hcl -var ACLTemplatedPolicyWorkloadIdentity string - -//go:embed acltemplatedpolicy/policies/ce/api-gateway.hcl -var ACLTemplatedPolicyAPIGateway string - -//go:embed acltemplatedpolicy/policies/ce/nomad-client.hcl -var ACLTemplatedPolicyNomadClient string - -func (t *ACLToken) TemplatedPolicyList() []*ACLTemplatedPolicy { - if len(t.TemplatedPolicies) == 0 { - return nil - } - - out := make([]*ACLTemplatedPolicy, 0, len(t.TemplatedPolicies)) - for _, n := range t.TemplatedPolicies { - out = append(out, n.Clone()) - } - return out -} - -func (t *ACLRole) TemplatedPolicyList() []*ACLTemplatedPolicy { - if len(t.TemplatedPolicies) == 0 { - return nil - } - - out := make([]*ACLTemplatedPolicy, 0, len(t.TemplatedPolicies)) - for _, n := range t.TemplatedPolicies { - out = append(out, n.Clone()) - } - return out -} diff --git a/agent/structs/acl_templated_policy_ce_test.go b/agent/structs/acl_templated_policy_ce_test.go deleted file mode 100644 index f21292806283a..0000000000000 --- a/agent/structs/acl_templated_policy_ce_test.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package structs - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/api" -) - -func TestStructs_ACLTemplatedPolicy_SyntheticPolicy(t *testing.T) { - type testCase struct { - templatedPolicy *ACLTemplatedPolicy - expectedPolicy *ACLPolicy - } - - testCases := map[string]testCase{ - "service-identity-template": { - templatedPolicy: &ACLTemplatedPolicy{ - TemplateID: ACLTemplatedPolicyServiceID, - TemplateName: api.ACLTemplatedPolicyServiceName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "api", - }, - }, - expectedPolicy: &ACLPolicy{ - Description: "synthetic policy generated from templated policy: builtin/service", - Rules: ` -service "api" { - policy = "write" -} -service "api-sidecar-proxy" { - policy = "write" -} -service_prefix "" { - policy = "read" -} -node_prefix "" { - policy = "read" -}`, - }, - }, - "node-identity-template": { - templatedPolicy: &ACLTemplatedPolicy{ - TemplateID: ACLTemplatedPolicyNodeID, - TemplateName: api.ACLTemplatedPolicyNodeName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "web", - }, - }, - expectedPolicy: &ACLPolicy{ - Description: "synthetic policy generated from templated policy: builtin/node", - Rules: ` -node "web" { - policy = "write" -} -service_prefix "" { - policy = "read" -}`, - }, - }, - "dns-template": { - templatedPolicy: &ACLTemplatedPolicy{ - TemplateID: ACLTemplatedPolicyDNSID, - TemplateName: api.ACLTemplatedPolicyDNSName, - }, - expectedPolicy: &ACLPolicy{ - Description: "synthetic policy generated from templated policy: builtin/dns", - Rules: ` -node_prefix "" { - policy = "read" -} -service_prefix "" { - policy = "read" -} -query_prefix "" { - policy = "read" -}`, - }, - }, - "workload-identity-template": { - templatedPolicy: &ACLTemplatedPolicy{ - TemplateID: ACLTemplatedPolicyWorkloadIdentityID, - TemplateName: api.ACLTemplatedPolicyWorkloadIdentityName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "api", - }, - }, - expectedPolicy: &ACLPolicy{ - Description: "synthetic policy generated from templated policy: builtin/workload-identity", - Rules: `identity "api" { - policy = "write" -}`, - }, - }, - "api-gateway-template": { - templatedPolicy: &ACLTemplatedPolicy{ - TemplateID: ACLTemplatedPolicyAPIGatewayID, - TemplateName: api.ACLTemplatedPolicyAPIGatewayName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "api-gateway", - }, - }, - expectedPolicy: &ACLPolicy{ - Description: "synthetic policy generated from templated policy: builtin/api-gateway", - Rules: `mesh = "read" -node_prefix "" { - policy = "read" -} -service_prefix "" { - policy = "read" -} -service "api-gateway" { - policy = "write" -}`, - }, - }, - } - - for name, tcase := range testCases { - t.Run(name, func(t *testing.T) { - policy, err := tcase.templatedPolicy.SyntheticPolicy(nil) - - require.NoError(t, err) - require.Equal(t, tcase.expectedPolicy.Description, policy.Description) - require.Equal(t, tcase.expectedPolicy.Rules, policy.Rules) - require.Contains(t, policy.Name, "synthetic-policy-") - require.NotEmpty(t, policy.Hash) - require.NotEmpty(t, policy.ID) - }) - } -} diff --git a/agent/structs/acl_templated_policy_test.go b/agent/structs/acl_templated_policy_test.go deleted file mode 100644 index 5d907ca010bfd..0000000000000 --- a/agent/structs/acl_templated_policy_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package structs - -import ( - "testing" - - "github.com/hashicorp/consul/api" - "github.com/stretchr/testify/require" -) - -func TestDeduplicate(t *testing.T) { - type testCase struct { - templatedPolicies ACLTemplatedPolicies - expectedCount int - } - tcases := map[string]testCase{ - "multiple-of-the-same-template": { - templatedPolicies: ACLTemplatedPolicies{ - &ACLTemplatedPolicy{ - TemplateName: api.ACLTemplatedPolicyServiceName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "api", - }, - }, - &ACLTemplatedPolicy{ - TemplateName: api.ACLTemplatedPolicyServiceName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "api", - }, - }, - }, - expectedCount: 1, - }, - "separate-templates-with-matching-variables": { - templatedPolicies: ACLTemplatedPolicies{ - &ACLTemplatedPolicy{ - TemplateName: api.ACLTemplatedPolicyNodeName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "api", - }, - }, - &ACLTemplatedPolicy{ - TemplateName: api.ACLTemplatedPolicyServiceName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "api", - }, - }, - }, - expectedCount: 2, - }, - "separate-templates-with-multiple-matching-variables": { - templatedPolicies: ACLTemplatedPolicies{ - &ACLTemplatedPolicy{ - TemplateName: api.ACLTemplatedPolicyServiceName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "api", - }, - }, - &ACLTemplatedPolicy{ - TemplateName: api.ACLTemplatedPolicyNodeName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "api", - }, - }, - &ACLTemplatedPolicy{ - TemplateName: api.ACLTemplatedPolicyNodeName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "web", - }, - }, - &ACLTemplatedPolicy{ - TemplateName: api.ACLTemplatedPolicyServiceName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "api", - }, - }, - &ACLTemplatedPolicy{ - TemplateName: api.ACLTemplatedPolicyDNSName, - }, - &ACLTemplatedPolicy{ - TemplateName: api.ACLTemplatedPolicyServiceName, - TemplateVariables: &ACLTemplatedPolicyVariables{ - Name: "web", - }, - }, - &ACLTemplatedPolicy{ - TemplateName: api.ACLTemplatedPolicyDNSName, - }, - }, - expectedCount: 5, - }, - } - - for name, tcase := range tcases { - t.Run(name, func(t *testing.T) { - policies := tcase.templatedPolicies.Deduplicate() - - require.Equal(t, tcase.expectedCount, len(policies)) - }) - } -} diff --git a/agent/structs/acl_test.go b/agent/structs/acl_test.go index e1fb35263b95a..6658b8335021a 100644 --- a/agent/structs/acl_test.go +++ b/agent/structs/acl_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/aclfilter/filter.go b/agent/structs/aclfilter/filter.go index d59bf3c9c403b..ddd63db10e686 100644 --- a/agent/structs/aclfilter/filter.go +++ b/agent/structs/aclfilter/filter.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package aclfilter diff --git a/agent/structs/aclfilter/filter_test.go b/agent/structs/aclfilter/filter_test.go index 98f3bb63f291b..2339b0acd223d 100644 --- a/agent/structs/aclfilter/filter_test.go +++ b/agent/structs/aclfilter/filter_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package aclfilter diff --git a/agent/structs/acltemplatedpolicy/policies/ce/api-gateway.hcl b/agent/structs/acltemplatedpolicy/policies/ce/api-gateway.hcl deleted file mode 100644 index 7bb2e49081302..0000000000000 --- a/agent/structs/acltemplatedpolicy/policies/ce/api-gateway.hcl +++ /dev/null @@ -1,10 +0,0 @@ -mesh = "read" -node_prefix "" { - policy = "read" -} -service_prefix "" { - policy = "read" -} -service "{{.Name}}" { - policy = "write" -} \ No newline at end of file diff --git a/agent/structs/acltemplatedpolicy/policies/ce/dns.hcl b/agent/structs/acltemplatedpolicy/policies/ce/dns.hcl deleted file mode 100644 index 6627f1c96a5ff..0000000000000 --- a/agent/structs/acltemplatedpolicy/policies/ce/dns.hcl +++ /dev/null @@ -1,10 +0,0 @@ - -node_prefix "" { - policy = "read" -} -service_prefix "" { - policy = "read" -} -query_prefix "" { - policy = "read" -} \ No newline at end of file diff --git a/agent/structs/acltemplatedpolicy/policies/ce/node.hcl b/agent/structs/acltemplatedpolicy/policies/ce/node.hcl deleted file mode 100644 index b6b03a2250f02..0000000000000 --- a/agent/structs/acltemplatedpolicy/policies/ce/node.hcl +++ /dev/null @@ -1,7 +0,0 @@ - -node "{{.Name}}" { - policy = "write" -} -service_prefix "" { - policy = "read" -} \ No newline at end of file diff --git a/agent/structs/acltemplatedpolicy/policies/ce/nomad-client.hcl b/agent/structs/acltemplatedpolicy/policies/ce/nomad-client.hcl deleted file mode 100644 index 4ea9f1e6df51e..0000000000000 --- a/agent/structs/acltemplatedpolicy/policies/ce/nomad-client.hcl +++ /dev/null @@ -1,12 +0,0 @@ -agent_prefix "" { - policy = "read" -} -node_prefix "" { - policy = "read" -} -service_prefix "" { - policy = "write" -} -key_prefix "" { - policy = "read" -} \ No newline at end of file diff --git a/agent/structs/acltemplatedpolicy/policies/ce/nomad-server.hcl b/agent/structs/acltemplatedpolicy/policies/ce/nomad-server.hcl deleted file mode 100644 index 7030ff771a199..0000000000000 --- a/agent/structs/acltemplatedpolicy/policies/ce/nomad-server.hcl +++ /dev/null @@ -1,11 +0,0 @@ - -acl = "write" -agent_prefix "" { - policy = "read" -} -node_prefix "" { - policy = "read" -} -service_prefix "" { - policy = "write" -} \ No newline at end of file diff --git a/agent/structs/acltemplatedpolicy/policies/ce/service.hcl b/agent/structs/acltemplatedpolicy/policies/ce/service.hcl deleted file mode 100644 index a8d2faf2791d1..0000000000000 --- a/agent/structs/acltemplatedpolicy/policies/ce/service.hcl +++ /dev/null @@ -1,13 +0,0 @@ - -service "{{.Name}}" { - policy = "write" -} -service "{{.Name}}-sidecar-proxy" { - policy = "write" -} -service_prefix "" { - policy = "read" -} -node_prefix "" { - policy = "read" -} \ No newline at end of file diff --git a/agent/structs/acltemplatedpolicy/policies/ce/workload-identity.hcl b/agent/structs/acltemplatedpolicy/policies/ce/workload-identity.hcl deleted file mode 100644 index ccd1e0564633d..0000000000000 --- a/agent/structs/acltemplatedpolicy/policies/ce/workload-identity.hcl +++ /dev/null @@ -1,3 +0,0 @@ -identity "{{.Name}}" { - policy = "write" -} \ No newline at end of file diff --git a/agent/structs/acltemplatedpolicy/schemas/api-gateway.json b/agent/structs/acltemplatedpolicy/schemas/api-gateway.json deleted file mode 100644 index 8a3d193268217..0000000000000 --- a/agent/structs/acltemplatedpolicy/schemas/api-gateway.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "type": "object", - "properties": { - "name": { "type": "string", "$ref": "#/definitions/min-length-one" } - }, - "required": ["name"], - "definitions": { - "min-length-one": { - "type": "string", - "minLength": 1 - } - } -} \ No newline at end of file diff --git a/agent/structs/acltemplatedpolicy/schemas/node.json b/agent/structs/acltemplatedpolicy/schemas/node.json deleted file mode 100644 index 8a3d193268217..0000000000000 --- a/agent/structs/acltemplatedpolicy/schemas/node.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "type": "object", - "properties": { - "name": { "type": "string", "$ref": "#/definitions/min-length-one" } - }, - "required": ["name"], - "definitions": { - "min-length-one": { - "type": "string", - "minLength": 1 - } - } -} \ No newline at end of file diff --git a/agent/structs/acltemplatedpolicy/schemas/service.json b/agent/structs/acltemplatedpolicy/schemas/service.json deleted file mode 100644 index 8a3d193268217..0000000000000 --- a/agent/structs/acltemplatedpolicy/schemas/service.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "type": "object", - "properties": { - "name": { "type": "string", "$ref": "#/definitions/min-length-one" } - }, - "required": ["name"], - "definitions": { - "min-length-one": { - "type": "string", - "minLength": 1 - } - } -} \ No newline at end of file diff --git a/agent/structs/acltemplatedpolicy/schemas/workload-identity.json b/agent/structs/acltemplatedpolicy/schemas/workload-identity.json deleted file mode 100644 index 31064f36af7f0..0000000000000 --- a/agent/structs/acltemplatedpolicy/schemas/workload-identity.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "type": "object", - "properties": { - "name": { "type": "string", "$ref": "#/definitions/min-length-one" } - }, - "required": ["name"], - "definitions": { - "min-length-one": { - "type": "string", - "minLength": 1 - } - } -} \ No newline at end of file diff --git a/agent/structs/auto_encrypt.go b/agent/structs/auto_encrypt.go index cce7c4effa1e3..2e9053f9a5382 100644 --- a/agent/structs/auto_encrypt.go +++ b/agent/structs/auto_encrypt.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/autopilot.go b/agent/structs/autopilot.go index 431e2ad4d374f..a5a14684fa9e3 100644 --- a/agent/structs/autopilot.go +++ b/agent/structs/autopilot.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/autopilot_ce.go b/agent/structs/autopilot_ce.go index f15e28f6a897e..3098c0cf3cae6 100644 --- a/agent/structs/autopilot_ce.go +++ b/agent/structs/autopilot_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/catalog.go b/agent/structs/catalog.go index 84795ce47898d..f11af9f87801b 100644 --- a/agent/structs/catalog.go +++ b/agent/structs/catalog.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/catalog_ce.go b/agent/structs/catalog_ce.go index 463f88fcbe2e4..91e08264b99d0 100644 --- a/agent/structs/catalog_ce.go +++ b/agent/structs/catalog_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/check_definition.go b/agent/structs/check_definition.go index 0b04290e707d5..f28201b4d17f5 100644 --- a/agent/structs/check_definition.go +++ b/agent/structs/check_definition.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/check_definition_test.go b/agent/structs/check_definition_test.go index 5a51f377c0e8f..676499ef042a1 100644 --- a/agent/structs/check_definition_test.go +++ b/agent/structs/check_definition_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/check_type.go b/agent/structs/check_type.go index 61a5a762f7c54..e6342e8231f93 100644 --- a/agent/structs/check_type.go +++ b/agent/structs/check_type.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 04393b2761d23..43c6d6d953551 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -175,7 +175,6 @@ type ServiceConfigEntry struct { LocalConnectTimeoutMs int `json:",omitempty" alias:"local_connect_timeout_ms"` LocalRequestTimeoutMs int `json:",omitempty" alias:"local_request_timeout_ms"` BalanceInboundConnections string `json:",omitempty" alias:"balance_inbound_connections"` - RateLimits *RateLimits `json:",omitempty" alias:"rate_limits"` EnvoyExtensions EnvoyExtensions `json:",omitempty" alias:"envoy_extensions"` Meta map[string]string `json:",omitempty"` @@ -311,10 +310,6 @@ func (e *ServiceConfigEntry) Validate() error { } } - if err := validateRatelimit(e.RateLimits); err != nil { - validationErr = multierror.Append(validationErr, err) - } - if err := envoyextensions.ValidateExtensions(e.EnvoyExtensions.ToAPI()); err != nil { validationErr = multierror.Append(validationErr, err) } @@ -411,51 +406,14 @@ type DestinationConfig struct { Port int `json:",omitempty"` } -func IsIP(address string) bool { +func IsHostname(address string) bool { ip := net.ParseIP(address) - return ip != nil + return ip == nil } -// RateLimits is rate limiting configuration that is applied to -// inbound traffic for a service. -// Rate limiting is a Consul enterprise feature. -type RateLimits struct { - InstanceLevel InstanceLevelRateLimits `alias:"instance_level"` -} - -// InstanceLevelRateLimits represents rate limit configuration -// that are applied per service instance. -type InstanceLevelRateLimits struct { - // RequestsPerSecond is the average number of requests per second that can be - // made without being throttled. This field is required if RequestsMaxBurst - // is set. The allowed number of requests may exceed RequestsPerSecond up to - // the value specified in RequestsMaxBurst. - // - // Internally, this is the refill rate of the token bucket used for rate limiting. - RequestsPerSecond int `alias:"requests_per_second"` - - // RequestsMaxBurst is the maximum number of requests that can be sent - // in a burst. Should be equal to or greater than RequestsPerSecond. - // If unset, defaults to RequestsPerSecond. - // - // Internally, this is the maximum size of the token bucket used for rate limiting. - RequestsMaxBurst int `alias:"requests_max_burst"` - - // Routes is a list of rate limits applied to specific routes. - // For a given request, the first matching route will be applied, if any. - // Overrides any top-level configuration. - Routes []InstanceLevelRouteRateLimits -} - -// InstanceLevelRouteRateLimits represents rate limit configuration -// applied to a route matching one of PathExact/PathPrefix/PathRegex. -type InstanceLevelRouteRateLimits struct { - PathExact string `alias:"path_exact"` - PathPrefix string `alias:"path_prefix"` - PathRegex string `alias:"path_regex"` - - RequestsPerSecond int `alias:"requests_per_second"` - RequestsMaxBurst int `alias:"requests_max_burst"` +func IsIP(address string) bool { + ip := net.ParseIP(address) + return ip != nil } // ProxyConfigEntry is the top-level struct for global proxy configuration defaults. @@ -676,7 +634,7 @@ func (e *ProxyConfigEntry) UnmarshalBinary(data []byte) error { // into a concrete type. // // There is an 'api' variation of this in -// command/helpers/helpers.go:newDecodeConfigEntry +// command/config/write/config_write.go:newDecodeConfigEntry func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) { var entry ConfigEntry @@ -928,6 +886,19 @@ func (s *ServiceConfigRequest) RequestDatacenter() string { return s.Datacenter } +// GetLocalUpstreamIDs returns the list of non-peer service ids for upstreams defined on this request. +// This is often used for fetching service-defaults config entries. +func (s *ServiceConfigRequest) GetLocalUpstreamIDs() []ServiceID { + var upstreams []ServiceID + for i := range s.UpstreamServiceNames { + u := &s.UpstreamServiceNames[i] + if u.Peer == "" { + upstreams = append(upstreams, u.ServiceName.ToServiceID()) + } + } + return upstreams +} + func (r *ServiceConfigRequest) CacheInfo() cache.RequestInfo { info := cache.RequestInfo{ Token: r.Token, @@ -1307,7 +1278,6 @@ type ServiceConfigResponse struct { Mode ProxyMode `json:",omitempty"` Destination DestinationConfig `json:",omitempty"` AccessLogs AccessLogsConfig `json:",omitempty"` - RateLimits RateLimits `json:",omitempty"` Meta map[string]string `json:",omitempty"` EnvoyExtensions []EnvoyExtension `json:",omitempty"` QueryMeta diff --git a/agent/structs/config_entry_apigw_jwt_ce.go b/agent/structs/config_entry_apigw_jwt_ce.go deleted file mode 100644 index 61b9db5b16ab9..0000000000000 --- a/agent/structs/config_entry_apigw_jwt_ce.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package structs - -// APIGatewayJWTRequirement holds the list of JWT providers to be verified against -type APIGatewayJWTRequirement struct{} - -// JWTFilter holds the JWT Filter configuration for an HTTPRoute -type JWTFilter struct{} diff --git a/agent/structs/config_entry_ce.go b/agent/structs/config_entry_ce.go index d8ca4968ad53f..2977075bff1b7 100644 --- a/agent/structs/config_entry_ce.go +++ b/agent/structs/config_entry_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs @@ -29,8 +30,6 @@ func validateUnusedKeys(unused []string) error { // to exist on the target. case strings.HasSuffix(strings.ToLower(k), "namespace"): err = multierror.Append(err, fmt.Errorf("invalid config key %q, namespaces are a consul enterprise feature", k)) - case strings.Contains(strings.ToLower(k), "jwt"): - err = multierror.Append(err, fmt.Errorf("invalid config key %q, api-gateway jwt validation is a consul enterprise feature", k)) default: err = multierror.Append(err, fmt.Errorf("invalid config key %q", k)) } @@ -52,25 +51,3 @@ func validateExportedServicesName(name string) error { func makeEnterpriseConfigEntry(kind, name string) ConfigEntry { return nil } - -func validateRatelimit(rl *RateLimits) error { - if rl != nil { - return fmt.Errorf("invalid rate_limits config. Rate limiting is a consul enterprise feature") - } - return nil -} - -func (rl RateLimits) ToEnvoyExtension() *EnvoyExtension { return nil } - -// GetLocalUpstreamIDs returns the list of non-peer service ids for upstreams defined on this request. -// This is often used for fetching service-defaults config entries. -func (s *ServiceConfigRequest) GetLocalUpstreamIDs() []ServiceID { - var upstreams []ServiceID - for _, u := range s.UpstreamServiceNames { - if u.Peer != "" { - continue - } - upstreams = append(upstreams, u.ServiceName.ToServiceID()) - } - return upstreams -} diff --git a/agent/structs/config_entry_ce_test.go b/agent/structs/config_entry_ce_test.go index 52ee3240afafe..4561f4aae3587 100644 --- a/agent/structs/config_entry_ce_test.go +++ b/agent/structs/config_entry_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs @@ -101,60 +102,3 @@ func TestDecodeConfigEntry_CE(t *testing.T) { }) } } - -func Test_GetLocalUpstreamIDs(t *testing.T) { - cases := map[string]struct { - input *ServiceConfigRequest - expect []ServiceID - }{ - "no_upstreams": { - input: &ServiceConfigRequest{ - Name: "svc", - }, - expect: nil, - }, - "upstreams": { - input: &ServiceConfigRequest{ - Name: "svc", - UpstreamServiceNames: []PeeredServiceName{ - {ServiceName: NewServiceName("a", nil)}, - {ServiceName: NewServiceName("b", nil)}, - {ServiceName: NewServiceName("c", nil)}, - }, - }, - expect: []ServiceID{ - {ID: "a"}, - {ID: "b"}, - {ID: "c"}, - }, - }, - "peer_upstream": { - input: &ServiceConfigRequest{ - Name: "svc", - UpstreamServiceNames: []PeeredServiceName{ - {Peer: "p", ServiceName: NewServiceName("a", nil)}, - }, - }, - expect: nil, - }, - "mixed_upstreams": { - input: &ServiceConfigRequest{ - Name: "svc", - UpstreamServiceNames: []PeeredServiceName{ - {ServiceName: NewServiceName("a", nil)}, - {Peer: "p", ServiceName: NewServiceName("b", nil)}, - {ServiceName: NewServiceName("c", nil)}, - }, - }, - expect: []ServiceID{ - {ID: "a"}, - {ID: "c"}, - }, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - require.Equal(t, tc.expect, tc.input.GetLocalUpstreamIDs()) - }) - } -} diff --git a/agent/structs/config_entry_discoverychain.go b/agent/structs/config_entry_discoverychain.go index 8b43c4f9243a1..b7bbad33533bb 100644 --- a/agent/structs/config_entry_discoverychain.go +++ b/agent/structs/config_entry_discoverychain.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -372,10 +372,9 @@ func (m *ServiceRouteMatch) IsEmpty() bool { // ServiceRouteHTTPMatch is a set of http-specific match criteria. type ServiceRouteHTTPMatch struct { - PathExact string `json:",omitempty" alias:"path_exact"` - PathPrefix string `json:",omitempty" alias:"path_prefix"` - PathRegex string `json:",omitempty" alias:"path_regex"` - CaseInsensitive bool `json:",omitempty" alias:"case_insensitive"` + PathExact string `json:",omitempty" alias:"path_exact"` + PathPrefix string `json:",omitempty" alias:"path_prefix"` + PathRegex string `json:",omitempty" alias:"path_regex"` Header []ServiceRouteHTTPMatchHeader `json:",omitempty"` QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty" alias:"query_param"` @@ -386,7 +385,6 @@ func (m *ServiceRouteHTTPMatch) IsEmpty() bool { return m.PathExact == "" && m.PathPrefix == "" && m.PathRegex == "" && - !m.CaseInsensitive && len(m.Header) == 0 && len(m.QueryParam) == 0 && len(m.Methods) == 0 diff --git a/agent/structs/config_entry_discoverychain_ce.go b/agent/structs/config_entry_discoverychain_ce.go index d7333616d36e1..87c22263794e8 100644 --- a/agent/structs/config_entry_discoverychain_ce.go +++ b/agent/structs/config_entry_discoverychain_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/config_entry_discoverychain_ce_test.go b/agent/structs/config_entry_discoverychain_ce_test.go index b5053774a2ba2..2edcce2bedd8f 100644 --- a/agent/structs/config_entry_discoverychain_ce_test.go +++ b/agent/structs/config_entry_discoverychain_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/config_entry_discoverychain_test.go b/agent/structs/config_entry_discoverychain_test.go index 2403c62c8f180..57607dbd4c8a8 100644 --- a/agent/structs/config_entry_discoverychain_test.go +++ b/agent/structs/config_entry_discoverychain_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -2742,20 +2742,6 @@ func TestServiceRouterConfigEntry(t *testing.T) { }), validateErr: "contains an invalid retry condition: \"invalid-retry-condition\"", }, - //////////////// - { - name: "default route with case insensitive match", - entry: makerouter(routeMatch(httpMatch(&ServiceRouteHTTPMatch{ - CaseInsensitive: true, - }))), - }, - { - name: "route with path prefix and case insensitive match /apI", - entry: makerouter(routeMatch(httpMatch(&ServiceRouteHTTPMatch{ - PathPrefix: "/apI", - CaseInsensitive: true, - }))), - }, } for _, tc := range cases { diff --git a/agent/structs/config_entry_exports.go b/agent/structs/config_entry_exports.go index cabdd6d7cb8b9..ebe1486f119b8 100644 --- a/agent/structs/config_entry_exports.go +++ b/agent/structs/config_entry_exports.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_exports_ce.go b/agent/structs/config_entry_exports_ce.go index bf69a2d8d78ba..9f9bb1cc0825e 100644 --- a/agent/structs/config_entry_exports_ce.go +++ b/agent/structs/config_entry_exports_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/config_entry_exports_ce_test.go b/agent/structs/config_entry_exports_ce_test.go index fa66dd7ead329..671654b3b2a4a 100644 --- a/agent/structs/config_entry_exports_ce_test.go +++ b/agent/structs/config_entry_exports_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/config_entry_exports_test.go b/agent/structs/config_entry_exports_test.go index 62e5586a0dac2..7905b46009b79 100644 --- a/agent/structs/config_entry_exports_test.go +++ b/agent/structs/config_entry_exports_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_gateways.go b/agent/structs/config_entry_gateways.go index e3ccfbbb35d3e..9dec2ba95bba5 100644 --- a/agent/structs/config_entry_gateways.go +++ b/agent/structs/config_entry_gateways.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -932,17 +932,6 @@ type APIGatewayListener struct { Protocol APIGatewayListenerProtocol // TLS is the TLS settings for the listener. TLS APIGatewayTLSConfiguration - - // Override is the policy that overrides all other policy and route specific configuration - Override *APIGatewayPolicy `json:",omitempty"` - // Default is the policy that is the default for the listener and route, routes can override this behavior - Default *APIGatewayPolicy `json:",omitempty"` -} - -// APIGatewayPolicy holds the policy that configures the gateway listener, this is used in the `Override` and `Default` fields of a listener -type APIGatewayPolicy struct { - // JWT holds the JWT configuration for the Listener - JWT *APIGatewayJWTRequirement `json:",omitempty"` } func (l APIGatewayListener) GetHostname() string { diff --git a/agent/structs/config_entry_gateways_test.go b/agent/structs/config_entry_gateways_test.go index 16e8e2cbf50bf..5a14a3262462a 100644 --- a/agent/structs/config_entry_gateways_test.go +++ b/agent/structs/config_entry_gateways_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_inline_certificate.go b/agent/structs/config_entry_inline_certificate.go index a13b1d720a24f..e047cd3e70282 100644 --- a/agent/structs/config_entry_inline_certificate.go +++ b/agent/structs/config_entry_inline_certificate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -74,7 +74,6 @@ func (e *InlineCertificateConfigEntry) Validate() error { if privateKeyBlock == nil { return errors.New("failed to parse private key PEM") } - err = validateKeyLength(privateKeyBlock) if err != nil { return err diff --git a/agent/structs/config_entry_inline_certificate_test.go b/agent/structs/config_entry_inline_certificate_test.go index 3c537c059161b..b95f3b0e9694d 100644 --- a/agent/structs/config_entry_inline_certificate_test.go +++ b/agent/structs/config_entry_inline_certificate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_intentions.go b/agent/structs/config_entry_intentions.go index d06a260a59d8e..ba7178bb51f94 100644 --- a/agent/structs/config_entry_intentions.go +++ b/agent/structs/config_entry_intentions.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_intentions_ce.go b/agent/structs/config_entry_intentions_ce.go index 83a09765def37..3c97b55aac56a 100644 --- a/agent/structs/config_entry_intentions_ce.go +++ b/agent/structs/config_entry_intentions_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/config_entry_intentions_ce_test.go b/agent/structs/config_entry_intentions_ce_test.go index 2a5f093be70d6..23d4ded4d2617 100644 --- a/agent/structs/config_entry_intentions_ce_test.go +++ b/agent/structs/config_entry_intentions_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/config_entry_intentions_test.go b/agent/structs/config_entry_intentions_test.go index ea8703de05d86..56c04bb21e3cf 100644 --- a/agent/structs/config_entry_intentions_test.go +++ b/agent/structs/config_entry_intentions_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_jwt_provider.go b/agent/structs/config_entry_jwt_provider.go index 5a1944cb4660a..752bf62643c77 100644 --- a/agent/structs/config_entry_jwt_provider.go +++ b/agent/structs/config_entry_jwt_provider.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_jwt_provider_ce.go b/agent/structs/config_entry_jwt_provider_ce.go index 4ec86f8ce04c3..533f349c01e52 100644 --- a/agent/structs/config_entry_jwt_provider_ce.go +++ b/agent/structs/config_entry_jwt_provider_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/config_entry_jwt_provider_test.go b/agent/structs/config_entry_jwt_provider_test.go index 6a117fe5084f4..a63507663ce45 100644 --- a/agent/structs/config_entry_jwt_provider_test.go +++ b/agent/structs/config_entry_jwt_provider_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_mesh.go b/agent/structs/config_entry_mesh.go index c16cbf4243754..11baf3517d669 100644 --- a/agent/structs/config_entry_mesh.go +++ b/agent/structs/config_entry_mesh.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_mesh_ce.go b/agent/structs/config_entry_mesh_ce.go index 74c646361a465..1612d65682730 100644 --- a/agent/structs/config_entry_mesh_ce.go +++ b/agent/structs/config_entry_mesh_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/config_entry_mesh_test.go b/agent/structs/config_entry_mesh_test.go index f6eaea9e9c546..6bdfaa15cae80 100644 --- a/agent/structs/config_entry_mesh_test.go +++ b/agent/structs/config_entry_mesh_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_routes.go b/agent/structs/config_entry_routes.go index 7b5ac179aab0c..6e9eff36ba332 100644 --- a/agent/structs/config_entry_routes.go +++ b/agent/structs/config_entry_routes.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -7,7 +7,6 @@ import ( "errors" "fmt" "strings" - "time" "github.com/miekg/dns" @@ -358,38 +357,6 @@ type HTTPMatch struct { Query []HTTPQueryMatch } -func (m HTTPMatch) DeepEqual(other HTTPMatch) bool { - if m.Method != other.Method { - return false - } - - if m.Path != other.Path { - return false - } - - if len(m.Headers) != len(other.Headers) { - return false - } - - if len(m.Query) != len(other.Query) { - return false - } - - for i := 0; i < len(m.Headers); i++ { - if m.Headers[i] != other.Headers[i] { - return false - } - } - - for i := 0; i < len(m.Query); i++ { - if m.Query[i] != other.Query[i] { - return false - } - } - - return true -} - // HTTPMatchMethod specifies which type of HTTP verb should // be used for matching a given request. type HTTPMatchMethod string @@ -465,17 +432,8 @@ type HTTPQueryMatch struct { // HTTPFilters specifies a list of filters used to modify a request // before it is routed to an upstream. type HTTPFilters struct { - Headers []HTTPHeaderFilter - URLRewrite *URLRewrite - RetryFilter *RetryFilter - TimeoutFilter *TimeoutFilter - JWT *JWTFilter -} - -// HTTPResponseFilters specifies a list of filters used to modify the -// response returned by an upstream -type HTTPResponseFilters struct { - Headers []HTTPHeaderFilter + Headers []HTTPHeaderFilter + URLRewrite *URLRewrite } // HTTPHeaderFilter specifies how HTTP headers should be modified. @@ -489,27 +447,12 @@ type URLRewrite struct { Path string } -type RetryFilter struct { - NumRetries uint32 - RetryOn []string - RetryOnStatusCodes []uint32 - RetryOnConnectFailure bool -} - -type TimeoutFilter struct { - RequestTimeout time.Duration - IdleTimeout time.Duration -} - // HTTPRouteRule specifies the routing rules used to determine what upstream // service an HTTP request is routed to. type HTTPRouteRule struct { // Filters is a list of HTTP-based filters used to modify a request prior // to routing it to the upstream service Filters HTTPFilters - // ResponseFilters is a list of HTTP-based filters used to modify a response - // returned by the upstream service - ResponseFilters HTTPResponseFilters // Matches specified the matching criteria used in the routing table. If a // request matches the given HTTPMatch configuration, then traffic is routed // to services specified in the Services field. @@ -529,10 +472,6 @@ type HTTPService struct { // to routing it to the upstream service Filters HTTPFilters - // ResponseFilters is a list of HTTP-based filters used to modify the - // response returned from the upstream service - ResponseFilters HTTPResponseFilters - acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } diff --git a/agent/structs/config_entry_routes_test.go b/agent/structs/config_entry_routes_test.go index 2aac0051a3263..476ce46eed05d 100644 --- a/agent/structs/config_entry_routes_test.go +++ b/agent/structs/config_entry_routes_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -437,476 +437,3 @@ func TestHTTPRoute(t *testing.T) { } testConfigEntryNormalizeAndValidate(t, cases) } - -func TestHTTPMatch_DeepEqual(t *testing.T) { - type fields struct { - Headers []HTTPHeaderMatch - Method HTTPMatchMethod - Path HTTPPathMatch - Query []HTTPQueryMatch - } - type args struct { - other HTTPMatch - } - tests := map[string]struct { - match HTTPMatch - other HTTPMatch - want bool - }{ - "all fields equal": { - match: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - other: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - want: true, - }, - "differing number of header matches": { - match: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - other: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - want: false, - }, - "differing header matches": { - match: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h4", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - other: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - want: false, - }, - "different path matching": { - match: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/zoidberg", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - other: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - want: false, - }, - "differing methods": { - match: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodConnect, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - other: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - want: false, - }, - "differing number of query matches": { - match: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - other: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - want: false, - }, - "different query matches": { - match: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "another", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - other: HTTPMatch{ - Headers: []HTTPHeaderMatch{ - { - Match: HTTPHeaderMatchExact, - Name: "h1", - Value: "a", - }, - { - Match: HTTPHeaderMatchPrefix, - Name: "h2", - Value: "b", - }, - }, - Method: HTTPMatchMethodGet, - Path: HTTPPathMatch{ - Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), - Value: "/bender", - }, - Query: []HTTPQueryMatch{ - { - Match: HTTPQueryMatchExact, - Name: "q", - Value: "nibbler", - }, - { - Match: HTTPQueryMatchPresent, - Name: "ship", - Value: "planet express", - }, - }, - }, - want: false, - }, - } - for name, tt := range tests { - name := name - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - if got := tt.match.DeepEqual(tt.other); got != tt.want { - t.Errorf("HTTPMatch.DeepEqual() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/agent/structs/config_entry_sameness_group.go b/agent/structs/config_entry_sameness_group.go index 8fefb193537bc..3668ca798e46b 100644 --- a/agent/structs/config_entry_sameness_group.go +++ b/agent/structs/config_entry_sameness_group.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_sameness_group_ce.go b/agent/structs/config_entry_sameness_group_ce.go index c201ac8b429a3..282ab862fa3b3 100644 --- a/agent/structs/config_entry_sameness_group_ce.go +++ b/agent/structs/config_entry_sameness_group_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/config_entry_status.go b/agent/structs/config_entry_status.go index 749ba1596868d..fb91a0add3743 100644 --- a/agent/structs/config_entry_status.go +++ b/agent/structs/config_entry_status.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index e57e2c4041348..a2e64fca31f42 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -889,310 +889,6 @@ func TestDecodeConfigEntry(t *testing.T) { }, }, }, - { - name: "service-router: kitchen sink case insensitive", - snake: ` - kind = "service-router" - name = "main" - meta { - "foo" = "bar" - "gir" = "zim" - } - routes = [ - { - match { - http { - path_exact = "/foo" - case_insensitive = true - header = [ - { - name = "debug1" - present = true - }, - { - name = "debug2" - present = false - invert = true - }, - { - name = "debug3" - exact = "1" - }, - { - name = "debug4" - prefix = "aaa" - }, - { - name = "debug5" - suffix = "bbb" - }, - { - name = "debug6" - regex = "a.*z" - }, - ] - } - } - destination { - service = "carrot" - service_subset = "kale" - namespace = "leek" - prefix_rewrite = "/alternate" - request_timeout = "99s" - idle_timeout = "99s" - num_retries = 12345 - retry_on_connect_failure = true - retry_on_status_codes = [401, 209] - request_headers { - add { - x-foo = "bar" - } - set { - bar = "baz" - } - remove = ["qux"] - } - response_headers { - add { - x-foo = "bar" - } - set { - bar = "baz" - } - remove = ["qux"] - } - } - }, - { - match { - http { - path_prefix = "/foo" - methods = [ "GET", "DELETE" ] - query_param = [ - { - name = "hack1" - present = true - }, - { - name = "hack2" - exact = "1" - }, - { - name = "hack3" - regex = "a.*z" - }, - ] - } - } - }, - { - match { - http { - path_regex = "/foo" - } - } - }, - ] - `, - camel: ` - Kind = "service-router" - Name = "main" - Meta { - "foo" = "bar" - "gir" = "zim" - } - Routes = [ - { - Match { - HTTP { - PathExact = "/foo" - CaseInsensitive = true - Header = [ - { - Name = "debug1" - Present = true - }, - { - Name = "debug2" - Present = false - Invert = true - }, - { - Name = "debug3" - Exact = "1" - }, - { - Name = "debug4" - Prefix = "aaa" - }, - { - Name = "debug5" - Suffix = "bbb" - }, - { - Name = "debug6" - Regex = "a.*z" - }, - ] - } - } - Destination { - Service = "carrot" - ServiceSubset = "kale" - Namespace = "leek" - PrefixRewrite = "/alternate" - RequestTimeout = "99s" - IdleTimeout = "99s" - NumRetries = 12345 - RetryOnConnectFailure = true - RetryOnStatusCodes = [401, 209] - RequestHeaders { - Add { - x-foo = "bar" - } - Set { - bar = "baz" - } - Remove = ["qux"] - } - ResponseHeaders { - Add { - x-foo = "bar" - } - Set { - bar = "baz" - } - Remove = ["qux"] - } - } - }, - { - Match { - HTTP { - PathPrefix = "/foo" - Methods = [ "GET", "DELETE" ] - QueryParam = [ - { - Name = "hack1" - Present = true - }, - { - Name = "hack2" - Exact = "1" - }, - { - Name = "hack3" - Regex = "a.*z" - }, - ] - } - } - }, - { - Match { - HTTP { - PathRegex = "/foo" - } - } - }, - ] - `, - expect: &ServiceRouterConfigEntry{ - Kind: "service-router", - Name: "main", - Meta: map[string]string{ - "foo": "bar", - "gir": "zim", - }, - Routes: []ServiceRoute{ - { - Match: &ServiceRouteMatch{ - HTTP: &ServiceRouteHTTPMatch{ - PathExact: "/foo", - CaseInsensitive: true, - Header: []ServiceRouteHTTPMatchHeader{ - { - Name: "debug1", - Present: true, - }, - { - Name: "debug2", - Present: false, - Invert: true, - }, - { - Name: "debug3", - Exact: "1", - }, - { - Name: "debug4", - Prefix: "aaa", - }, - { - Name: "debug5", - Suffix: "bbb", - }, - { - Name: "debug6", - Regex: "a.*z", - }, - }, - }, - }, - Destination: &ServiceRouteDestination{ - Service: "carrot", - ServiceSubset: "kale", - Namespace: "leek", - PrefixRewrite: "/alternate", - RequestTimeout: 99 * time.Second, - IdleTimeout: 99 * time.Second, - NumRetries: 12345, - RetryOnConnectFailure: true, - RetryOnStatusCodes: []uint32{401, 209}, - RequestHeaders: &HTTPHeaderModifiers{ - Add: map[string]string{"x-foo": "bar"}, - Set: map[string]string{"bar": "baz"}, - Remove: []string{"qux"}, - }, - ResponseHeaders: &HTTPHeaderModifiers{ - Add: map[string]string{"x-foo": "bar"}, - Set: map[string]string{"bar": "baz"}, - Remove: []string{"qux"}, - }, - }, - }, - { - Match: &ServiceRouteMatch{ - HTTP: &ServiceRouteHTTPMatch{ - PathPrefix: "/foo", - Methods: []string{"GET", "DELETE"}, - QueryParam: []ServiceRouteHTTPMatchQueryParam{ - { - Name: "hack1", - Present: true, - }, - { - Name: "hack2", - Exact: "1", - }, - { - Name: "hack3", - Regex: "a.*z", - }, - }, - }, - }, - }, - { - Match: &ServiceRouteMatch{ - HTTP: &ServiceRouteHTTPMatch{ - PathRegex: "/foo", - }, - }, - }, - }, - }, - }, { name: "service-splitter: kitchen sink", snake: ` diff --git a/agent/structs/connect.go b/agent/structs/connect.go index 4a6033efab57c..227c2eb472702 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 267aeba5e63d9..c8a7cea1df8ae 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -217,11 +217,6 @@ type IssuedCert struct { // PrivateKeyPEM is the PEM encoded private key associated with CertPEM. PrivateKeyPEM string `json:",omitempty"` - // WorkloadIdentity is the name of the workload identity for which the cert was issued. - WorkloadIdentity string `json:",omitempty"` - // WorkloadIdentityURI is the cert URI value. - WorkloadIdentityURI string `json:",omitempty"` - // Service is the name of the service for which the cert was issued. Service string `json:",omitempty"` // ServiceURI is the cert URI value. @@ -252,12 +247,6 @@ type IssuedCert struct { RaftIndex } -func (i *IssuedCert) Key() string { - return fmt.Sprintf("%s", - i.SerialNumber, - ) -} - // CAOp is the operation for a request related to intentions. type CAOp string diff --git a/agent/structs/connect_ca_test.go b/agent/structs/connect_ca_test.go index 5f224e04f76a6..d64efef375bfd 100644 --- a/agent/structs/connect_ca_test.go +++ b/agent/structs/connect_ca_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/connect_ce.go b/agent/structs/connect_ce.go index 98d930939b3e3..9547c245148b0 100644 --- a/agent/structs/connect_ce.go +++ b/agent/structs/connect_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/connect_proxy_config.go b/agent/structs/connect_proxy_config.go index d84953e1b0e7e..acca2ad1cbcd4 100644 --- a/agent/structs/connect_proxy_config.go +++ b/agent/structs/connect_proxy_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" - pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" ) const ( @@ -181,39 +180,6 @@ type AccessLogsConfig struct { TextFormat string `json:",omitempty" alias:"text_format"` } -func (c *AccessLogsConfig) GetEnabled() bool { - return c.Enabled -} - -func (c *AccessLogsConfig) GetDisableListenerLogs() bool { - return c.DisableListenerLogs -} - -func (c *AccessLogsConfig) GetType() pbmesh.LogSinkType { - switch c.Type { - case FileLogSinkType: - return pbmesh.LogSinkType_LOG_SINK_TYPE_FILE - case StdErrLogSinkType: - return pbmesh.LogSinkType_LOG_SINK_TYPE_STDERR - case StdOutLogSinkType: - return pbmesh.LogSinkType_LOG_SINK_TYPE_STDOUT - } - - return pbmesh.LogSinkType_LOG_SINK_TYPE_DEFAULT -} - -func (c *AccessLogsConfig) GetPath() string { - return c.Path -} - -func (c *AccessLogsConfig) GetJsonFormat() string { - return c.JSONFormat -} - -func (c *AccessLogsConfig) GetTextFormat() string { - return c.TextFormat -} - func (c *AccessLogsConfig) IsZero() bool { if c == nil { return true @@ -839,12 +805,3 @@ func (e *ExposeConfig) Finalize() { } } } - -type AccessLogs interface { - GetEnabled() bool - GetDisableListenerLogs() bool - GetType() pbmesh.LogSinkType - GetPath() string - GetJsonFormat() string - GetTextFormat() string -} diff --git a/agent/structs/connect_proxy_config_ce.go b/agent/structs/connect_proxy_config_ce.go index 7b6bee851da9b..898ca163a5c2f 100644 --- a/agent/structs/connect_proxy_config_ce.go +++ b/agent/structs/connect_proxy_config_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/connect_proxy_config_test.go b/agent/structs/connect_proxy_config_test.go index be16d17be6a3c..bcf43b2119ca1 100644 --- a/agent/structs/connect_proxy_config_test.go +++ b/agent/structs/connect_proxy_config_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/deep-copy.sh b/agent/structs/deep-copy.sh index cbc1bdc42ae5d..e4ab69273a473 100755 --- a/agent/structs/deep-copy.sh +++ b/agent/structs/deep-copy.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +# SPDX-License-Identifier: MPL-2.0 readonly PACKAGE_DIR="$(dirname "${BASH_SOURCE[0]}")" diff --git a/agent/structs/discovery_chain.go b/agent/structs/discovery_chain.go index 4f2f7f41c46d6..b112f6a2b1c13 100644 --- a/agent/structs/discovery_chain.go +++ b/agent/structs/discovery_chain.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/discovery_chain_ce.go b/agent/structs/discovery_chain_ce.go index b769bf8c2e084..febff8c7762a7 100644 --- a/agent/structs/discovery_chain_ce.go +++ b/agent/structs/discovery_chain_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/envoy_extension.go b/agent/structs/envoy_extension.go index ab9988bf21bc8..c788aedf37e85 100644 --- a/agent/structs/envoy_extension.go +++ b/agent/structs/envoy_extension.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/errors.go b/agent/structs/errors.go index 31a818bd62f23..82e2b0b5f0132 100644 --- a/agent/structs/errors.go +++ b/agent/structs/errors.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -19,11 +19,6 @@ const ( errServiceNotFound = "Service not found: " errQueryNotFound = "Query not found" errLeaderNotTracked = "Raft leader not found in server lookup mapping" - errConnectNotEnabled = "Connect must be enabled in order to use this endpoint" - errRateLimited = "Rate limit reached, try again later" // Note: we depend on this error message in the gRPC ConnectCA.Sign endpoint (see: isRateLimitError). - errNotPrimaryDatacenter = "not the primary datacenter" - errStateReadOnly = "CA Provider State is read-only" - errUsingV2CatalogExperiment = "V1 catalog is disabled when V2 is enabled" ) var ( @@ -36,11 +31,6 @@ var ( ErrDCNotAvailable = errors.New(errDCNotAvailable) ErrQueryNotFound = errors.New(errQueryNotFound) ErrLeaderNotTracked = errors.New(errLeaderNotTracked) - ErrConnectNotEnabled = errors.New(errConnectNotEnabled) - ErrRateLimited = errors.New(errRateLimited) // Note: we depend on this error message in the gRPC ConnectCA.Sign endpoint (see: isRateLimitError). - ErrNotPrimaryDatacenter = errors.New(errNotPrimaryDatacenter) - ErrStateReadOnly = errors.New(errStateReadOnly) - ErrUsingV2CatalogExperiment = errors.New(errUsingV2CatalogExperiment) ) func IsErrNoDCPath(err error) bool { @@ -62,7 +52,3 @@ func IsErrRPCRateExceeded(err error) bool { func IsErrServiceNotFound(err error) bool { return err != nil && strings.Contains(err.Error(), errServiceNotFound) } - -func IsErrUsingV2CatalogExperiment(err error) bool { - return err != nil && strings.Contains(err.Error(), errUsingV2CatalogExperiment) -} diff --git a/agent/structs/federation_state.go b/agent/structs/federation_state.go index 5a0e8eef6d04d..f123a0954be7e 100644 --- a/agent/structs/federation_state.go +++ b/agent/structs/federation_state.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/identity.go b/agent/structs/identity.go index b55f6bb7ae51d..286dd552a3469 100644 --- a/agent/structs/identity.go +++ b/agent/structs/identity.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 95e9d8388aae8..21d5432b41123 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -30,9 +30,6 @@ const ( // fix up all the places where this was used with the proper namespace // value. IntentionDefaultNamespace = "default" - - IntentionDefaultPolicyAllow = "allow" - IntentionDefaultPolicyDeny = "deny" ) // Intention defines an intention for the Connect Service Graph. This defines @@ -730,7 +727,7 @@ type IntentionQueryCheckResponse struct { // - Whether the matching intention has L7 permissions attached // - Whether the intention is managed by an external source like k8s // - Whether there is an exact, or wildcard, intention referencing the two services -// - Whether intentions are in DefaultAllow mode +// - Whether ACLs are in DefaultAllow mode type IntentionDecisionSummary struct { Allowed bool HasPermissions bool diff --git a/agent/structs/intention_ce.go b/agent/structs/intention_ce.go index d417a63f5964e..af62df50bca44 100644 --- a/agent/structs/intention_ce.go +++ b/agent/structs/intention_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/intention_test.go b/agent/structs/intention_test.go index 077bef0f200d2..600f600729d13 100644 --- a/agent/structs/intention_test.go +++ b/agent/structs/intention_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/operator.go b/agent/structs/operator.go index 9d78cb92143f8..05862861e3f85 100644 --- a/agent/structs/operator.go +++ b/agent/structs/operator.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/peering.go b/agent/structs/peering.go index 927efabb34c25..3ee7acb224025 100644 --- a/agent/structs/peering.go +++ b/agent/structs/peering.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/prepared_query.go b/agent/structs/prepared_query.go index 1c851c476ef00..71b0eba81bc3e 100644 --- a/agent/structs/prepared_query.go +++ b/agent/structs/prepared_query.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -341,7 +341,7 @@ type PreparedQueryExecuteRemoteRequest struct { Connect bool // QueryOptions (unfortunately named here) controls the consistency - // settings for the service lookups. + // settings for the the service lookups. QueryOptions } diff --git a/agent/structs/prepared_query_test.go b/agent/structs/prepared_query_test.go index 537c6b043818a..a6d6e64849af0 100644 --- a/agent/structs/prepared_query_test.go +++ b/agent/structs/prepared_query_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/protobuf_compat.go b/agent/structs/protobuf_compat.go index c52d85a32c29f..65ebf2eaf4acd 100644 --- a/agent/structs/protobuf_compat.go +++ b/agent/structs/protobuf_compat.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index 9b9fec89e9540..6ee81af0590c9 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/service_definition_test.go b/agent/structs/service_definition_test.go index ec6d4fc5374e9..023972092ff44 100644 --- a/agent/structs/service_definition_test.go +++ b/agent/structs/service_definition_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/snapshot.go b/agent/structs/snapshot.go index 3d71fb093d3c6..4f622216751f0 100644 --- a/agent/structs/snapshot.go +++ b/agent/structs/snapshot.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/structs.deepcopy.go b/agent/structs/structs.deepcopy.go index 98b118eb21842..5ddf86352f06b 100644 --- a/agent/structs/structs.deepcopy.go +++ b/agent/structs/structs.deepcopy.go @@ -18,20 +18,6 @@ func (o *APIGatewayListener) DeepCopy() *APIGatewayListener { cp.TLS.CipherSuites = make([]types.TLSCipherSuite, len(o.TLS.CipherSuites)) copy(cp.TLS.CipherSuites, o.TLS.CipherSuites) } - if o.Override != nil { - cp.Override = new(APIGatewayPolicy) - *cp.Override = *o.Override - if o.Override.JWT != nil { - cp.Override.JWT = o.Override.JWT.DeepCopy() - } - } - if o.Default != nil { - cp.Default = new(APIGatewayPolicy) - *cp.Default = *o.Default - if o.Default.JWT != nil { - cp.Default.JWT = o.Default.JWT.DeepCopy() - } - } return &cp } @@ -397,47 +383,6 @@ func (o *HTTPRouteConfigEntry) DeepCopy() *HTTPRouteConfigEntry { cp.Rules[i2].Filters.URLRewrite = new(URLRewrite) *cp.Rules[i2].Filters.URLRewrite = *o.Rules[i2].Filters.URLRewrite } - if o.Rules[i2].Filters.RetryFilter != nil { - cp.Rules[i2].Filters.RetryFilter = new(RetryFilter) - *cp.Rules[i2].Filters.RetryFilter = *o.Rules[i2].Filters.RetryFilter - if o.Rules[i2].Filters.RetryFilter.RetryOn != nil { - cp.Rules[i2].Filters.RetryFilter.RetryOn = make([]string, len(o.Rules[i2].Filters.RetryFilter.RetryOn)) - copy(cp.Rules[i2].Filters.RetryFilter.RetryOn, o.Rules[i2].Filters.RetryFilter.RetryOn) - } - if o.Rules[i2].Filters.RetryFilter.RetryOnStatusCodes != nil { - cp.Rules[i2].Filters.RetryFilter.RetryOnStatusCodes = make([]uint32, len(o.Rules[i2].Filters.RetryFilter.RetryOnStatusCodes)) - copy(cp.Rules[i2].Filters.RetryFilter.RetryOnStatusCodes, o.Rules[i2].Filters.RetryFilter.RetryOnStatusCodes) - } - } - if o.Rules[i2].Filters.TimeoutFilter != nil { - cp.Rules[i2].Filters.TimeoutFilter = new(TimeoutFilter) - *cp.Rules[i2].Filters.TimeoutFilter = *o.Rules[i2].Filters.TimeoutFilter - } - if o.Rules[i2].Filters.JWT != nil { - cp.Rules[i2].Filters.JWT = o.Rules[i2].Filters.JWT.DeepCopy() - } - if o.Rules[i2].ResponseFilters.Headers != nil { - cp.Rules[i2].ResponseFilters.Headers = make([]HTTPHeaderFilter, len(o.Rules[i2].ResponseFilters.Headers)) - copy(cp.Rules[i2].ResponseFilters.Headers, o.Rules[i2].ResponseFilters.Headers) - for i5 := range o.Rules[i2].ResponseFilters.Headers { - if o.Rules[i2].ResponseFilters.Headers[i5].Add != nil { - cp.Rules[i2].ResponseFilters.Headers[i5].Add = make(map[string]string, len(o.Rules[i2].ResponseFilters.Headers[i5].Add)) - for k7, v7 := range o.Rules[i2].ResponseFilters.Headers[i5].Add { - cp.Rules[i2].ResponseFilters.Headers[i5].Add[k7] = v7 - } - } - if o.Rules[i2].ResponseFilters.Headers[i5].Remove != nil { - cp.Rules[i2].ResponseFilters.Headers[i5].Remove = make([]string, len(o.Rules[i2].ResponseFilters.Headers[i5].Remove)) - copy(cp.Rules[i2].ResponseFilters.Headers[i5].Remove, o.Rules[i2].ResponseFilters.Headers[i5].Remove) - } - if o.Rules[i2].ResponseFilters.Headers[i5].Set != nil { - cp.Rules[i2].ResponseFilters.Headers[i5].Set = make(map[string]string, len(o.Rules[i2].ResponseFilters.Headers[i5].Set)) - for k7, v7 := range o.Rules[i2].ResponseFilters.Headers[i5].Set { - cp.Rules[i2].ResponseFilters.Headers[i5].Set[k7] = v7 - } - } - } - } if o.Rules[i2].Matches != nil { cp.Rules[i2].Matches = make([]HTTPMatch, len(o.Rules[i2].Matches)) copy(cp.Rules[i2].Matches, o.Rules[i2].Matches) @@ -482,47 +427,6 @@ func (o *HTTPRouteConfigEntry) DeepCopy() *HTTPRouteConfigEntry { cp.Rules[i2].Services[i4].Filters.URLRewrite = new(URLRewrite) *cp.Rules[i2].Services[i4].Filters.URLRewrite = *o.Rules[i2].Services[i4].Filters.URLRewrite } - if o.Rules[i2].Services[i4].Filters.RetryFilter != nil { - cp.Rules[i2].Services[i4].Filters.RetryFilter = new(RetryFilter) - *cp.Rules[i2].Services[i4].Filters.RetryFilter = *o.Rules[i2].Services[i4].Filters.RetryFilter - if o.Rules[i2].Services[i4].Filters.RetryFilter.RetryOn != nil { - cp.Rules[i2].Services[i4].Filters.RetryFilter.RetryOn = make([]string, len(o.Rules[i2].Services[i4].Filters.RetryFilter.RetryOn)) - copy(cp.Rules[i2].Services[i4].Filters.RetryFilter.RetryOn, o.Rules[i2].Services[i4].Filters.RetryFilter.RetryOn) - } - if o.Rules[i2].Services[i4].Filters.RetryFilter.RetryOnStatusCodes != nil { - cp.Rules[i2].Services[i4].Filters.RetryFilter.RetryOnStatusCodes = make([]uint32, len(o.Rules[i2].Services[i4].Filters.RetryFilter.RetryOnStatusCodes)) - copy(cp.Rules[i2].Services[i4].Filters.RetryFilter.RetryOnStatusCodes, o.Rules[i2].Services[i4].Filters.RetryFilter.RetryOnStatusCodes) - } - } - if o.Rules[i2].Services[i4].Filters.TimeoutFilter != nil { - cp.Rules[i2].Services[i4].Filters.TimeoutFilter = new(TimeoutFilter) - *cp.Rules[i2].Services[i4].Filters.TimeoutFilter = *o.Rules[i2].Services[i4].Filters.TimeoutFilter - } - if o.Rules[i2].Services[i4].Filters.JWT != nil { - cp.Rules[i2].Services[i4].Filters.JWT = o.Rules[i2].Services[i4].Filters.JWT.DeepCopy() - } - if o.Rules[i2].Services[i4].ResponseFilters.Headers != nil { - cp.Rules[i2].Services[i4].ResponseFilters.Headers = make([]HTTPHeaderFilter, len(o.Rules[i2].Services[i4].ResponseFilters.Headers)) - copy(cp.Rules[i2].Services[i4].ResponseFilters.Headers, o.Rules[i2].Services[i4].ResponseFilters.Headers) - for i7 := range o.Rules[i2].Services[i4].ResponseFilters.Headers { - if o.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Add != nil { - cp.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Add = make(map[string]string, len(o.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Add)) - for k9, v9 := range o.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Add { - cp.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Add[k9] = v9 - } - } - if o.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Remove != nil { - cp.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Remove = make([]string, len(o.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Remove)) - copy(cp.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Remove, o.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Remove) - } - if o.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Set != nil { - cp.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Set = make(map[string]string, len(o.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Set)) - for k9, v9 := range o.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Set { - cp.Rules[i2].Services[i4].ResponseFilters.Headers[i7].Set[k9] = v9 - } - } - } - } } } } @@ -925,14 +829,6 @@ func (o *ServiceConfigEntry) DeepCopy() *ServiceConfigEntry { copy(cp.Destination.Addresses, o.Destination.Addresses) } } - if o.RateLimits != nil { - cp.RateLimits = new(RateLimits) - *cp.RateLimits = *o.RateLimits - if o.RateLimits.InstanceLevel.Routes != nil { - cp.RateLimits.InstanceLevel.Routes = make([]InstanceLevelRouteRateLimits, len(o.RateLimits.InstanceLevel.Routes)) - copy(cp.RateLimits.InstanceLevel.Routes, o.RateLimits.InstanceLevel.Routes) - } - } if o.EnvoyExtensions != nil { cp.EnvoyExtensions = make([]EnvoyExtension, len(o.EnvoyExtensions)) copy(cp.EnvoyExtensions, o.EnvoyExtensions) @@ -983,10 +879,6 @@ func (o *ServiceConfigResponse) DeepCopy() *ServiceConfigResponse { cp.Destination.Addresses = make([]string, len(o.Destination.Addresses)) copy(cp.Destination.Addresses, o.Destination.Addresses) } - if o.RateLimits.InstanceLevel.Routes != nil { - cp.RateLimits.InstanceLevel.Routes = make([]InstanceLevelRouteRateLimits, len(o.RateLimits.InstanceLevel.Routes)) - copy(cp.RateLimits.InstanceLevel.Routes, o.RateLimits.InstanceLevel.Routes) - } if o.Meta != nil { cp.Meta = make(map[string]string, len(o.Meta)) for k2, v2 := range o.Meta { diff --git a/agent/structs/structs.deepcopy_ce.go b/agent/structs/structs.deepcopy_ce.go deleted file mode 100644 index 71f20d7632d55..0000000000000 --- a/agent/structs/structs.deepcopy_ce.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !consulent - -package structs - -// DeepCopy generates a deep copy of *APIGatewayJWTRequirement -func (o *APIGatewayJWTRequirement) DeepCopy() *APIGatewayJWTRequirement { - return new(APIGatewayJWTRequirement) -} - -// DeepCopy generates a deep copy of *JWTFilter -func (o *JWTFilter) DeepCopy() *JWTFilter { - return new(JWTFilter) -} diff --git a/agent/structs/structs.go b/agent/structs/structs.go index d9fbac2bfe4cc..34e71c5d9d264 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "math/rand" - "os" "reflect" "regexp" "sort" @@ -228,9 +227,6 @@ const ( var allowedConsulMetaKeysForMeshGateway = map[string]struct{}{MetaWANFederationKey: {}} -// CEDowngrade indicates if we are in downgrading from ent to ce -var CEDowngrade = os.Getenv("CONSUL_ENTERPRISE_DOWNGRADE_TO_CE") == "true" - var ( NodeMaintCheckID = NewCheckID(NodeMaint, nil) ) @@ -1489,10 +1485,6 @@ func (s *NodeService) IsGateway() bool { func (s *NodeService) Validate() error { var result error - if err := s.Locality.Validate(); err != nil { - result = multierror.Append(result, err) - } - if s.Kind == ServiceKindConnectProxy { if s.Port == 0 && s.SocketPath == "" { result = multierror.Append(result, fmt.Errorf("Port or SocketPath must be set for a %s", s.Kind)) @@ -2106,18 +2098,6 @@ func (csn *CheckServiceNode) CanRead(authz acl.Authorizer) acl.EnforcementDecisi return acl.Allow } -func (csn *CheckServiceNode) Locality() *Locality { - if csn.Service != nil && csn.Service.Locality != nil { - return csn.Service.Locality - } - - if csn.Node != nil && csn.Node.Locality != nil { - return csn.Node.Locality - } - - return nil -} - type CheckServiceNodes []CheckServiceNode func (csns CheckServiceNodes) DeepCopy() CheckServiceNodes { @@ -3139,15 +3119,3 @@ func (l *Locality) GetRegion() string { } return l.Region } - -func (l *Locality) Validate() error { - if l == nil { - return nil - } - - if l.Region == "" && l.Zone != "" { - return fmt.Errorf("zone cannot be set without region") - } - - return nil -} diff --git a/agent/structs/structs_ce.go b/agent/structs/structs_ce.go index 2fa039032c23b..6b004460e5fb7 100644 --- a/agent/structs/structs_ce.go +++ b/agent/structs/structs_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs @@ -173,13 +174,3 @@ func (s *ServiceNode) NodeIdentity() Identity { } type EnterpriseServiceUsage struct{} - -// WithNormalizedUpstreams returns a deep copy of the NodeService with no modifications to -// data for CE versions. -func (ns *NodeService) WithNormalizedUpstreams() *NodeService { - // Simply return a copy for CE, since it doesn't have partitions or namespaces. - if ns == nil { - return nil - } - return ns.DeepCopy() -} diff --git a/agent/structs/structs_ce_test.go b/agent/structs/structs_ce_test.go index 67e3825f24f9c..55e51a70088c3 100644 --- a/agent/structs/structs_ce_test.go +++ b/agent/structs/structs_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package structs diff --git a/agent/structs/structs_ext_test.go b/agent/structs/structs_ext_test.go index 3b1cd6b6df1cf..9bbc4ca0cb915 100644 --- a/agent/structs/structs_ext_test.go +++ b/agent/structs/structs_ext_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs_test diff --git a/agent/structs/structs_filtering_test.go b/agent/structs/structs_filtering_test.go index 3750adb16ce1f..9739923e0e5a9 100644 --- a/agent/structs/structs_filtering_test.go +++ b/agent/structs/structs_filtering_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/structs_test.go b/agent/structs/structs_test.go index bf909aa41903e..6d887da9ac776 100644 --- a/agent/structs/structs_test.go +++ b/agent/structs/structs_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs @@ -592,43 +592,6 @@ func TestStructs_ServiceNode_Conversions(t *testing.T) { } } -func TestStructs_Locality_Validate(t *testing.T) { - type testCase struct { - locality *Locality - err string - } - cases := map[string]testCase{ - "nil": { - nil, - "", - }, - "region only": { - &Locality{Region: "us-west-1"}, - "", - }, - "region and zone": { - &Locality{Region: "us-west-1", Zone: "us-west-1a"}, - "", - }, - "zone only": { - &Locality{Zone: "us-west-1a"}, - "zone cannot be set without region", - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - err := tc.locality.Validate() - if tc.err == "" { - require.NoError(t, err) - } else { - require.Error(t, err) - require.Contains(t, err.Error(), tc.err) - } - }) - } -} - func TestStructs_NodeService_ValidateMeshGateway(t *testing.T) { type testCase struct { Modify func(*NodeService) @@ -1189,13 +1152,6 @@ func TestStructs_NodeService_ValidateConnectProxy(t *testing.T) { }, "", }, - { - "connect-proxy: invalid locality", - func(x *NodeService) { - x.Locality = &Locality{Zone: "bad"} - }, - "zone cannot be set without region", - }, } for _, tc := range cases { @@ -1358,7 +1314,7 @@ func TestStructs_NodeService_ValidateSidecarService(t *testing.T) { } func TestStructs_NodeService_ConnectNativeEmptyPortError(t *testing.T) { - ns := TestNodeService() + ns := TestNodeService(t) ns.Connect.Native = true ns.Port = 0 err := ns.Validate() diff --git a/agent/structs/system_metadata.go b/agent/structs/system_metadata.go index 555756201a5d2..fa6f6cd0ae58c 100644 --- a/agent/structs/system_metadata.go +++ b/agent/structs/system_metadata.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/testing.go b/agent/structs/testing.go index 527da21c4e890..ad8382b0ce3b1 100644 --- a/agent/structs/testing.go +++ b/agent/structs/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/testing_catalog.go b/agent/structs/testing_catalog.go index 8047695bba564..9e72aebc77458 100644 --- a/agent/structs/testing_catalog.go +++ b/agent/structs/testing_catalog.go @@ -1,14 +1,13 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs import ( "fmt" - "github.com/mitchellh/go-testing-interface" - "github.com/hashicorp/consul/acl" + "github.com/mitchellh/go-testing-interface" ) // TestRegisterRequest returns a RegisterRequest for registering a typical service. @@ -48,11 +47,11 @@ func TestRegisterIngressGateway(t testing.T) *RegisterRequest { } // TestNodeService returns a *NodeService representing a valid regular service: "web". -func TestNodeService() *NodeService { - return TestNodeServiceWithName("web") +func TestNodeService(t testing.T) *NodeService { + return TestNodeServiceWithName(t, "web") } -func TestNodeServiceWithName(name string) *NodeService { +func TestNodeServiceWithName(t testing.T, name string) *NodeService { return &NodeService{ Kind: ServiceKindTypical, Service: name, diff --git a/agent/structs/testing_connect_proxy_config.go b/agent/structs/testing_connect_proxy_config.go index 971e355fba2da..c41cc99651113 100644 --- a/agent/structs/testing_connect_proxy_config.go +++ b/agent/structs/testing_connect_proxy_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/testing_intention.go b/agent/structs/testing_intention.go index 974a103de1bd4..57c6f9d50154e 100644 --- a/agent/structs/testing_intention.go +++ b/agent/structs/testing_intention.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/testing_service_definition.go b/agent/structs/testing_service_definition.go index da51014f43115..2707067262f5e 100644 --- a/agent/structs/testing_service_definition.go +++ b/agent/structs/testing_service_definition.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/structs/txn.go b/agent/structs/txn.go index efb11aa891eae..a97b4733f5edb 100644 --- a/agent/structs/txn.go +++ b/agent/structs/txn.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package structs diff --git a/agent/submatview/handler.go b/agent/submatview/handler.go index f33746a7f324c..b3c900e695286 100644 --- a/agent/submatview/handler.go +++ b/agent/submatview/handler.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package submatview diff --git a/agent/submatview/local_materializer.go b/agent/submatview/local_materializer.go index 4ee06a3640298..5eeaefaa665a9 100644 --- a/agent/submatview/local_materializer.go +++ b/agent/submatview/local_materializer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package submatview diff --git a/agent/submatview/local_materializer_test.go b/agent/submatview/local_materializer_test.go index fa7a01845034b..e6600a0516207 100644 --- a/agent/submatview/local_materializer_test.go +++ b/agent/submatview/local_materializer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package submatview diff --git a/agent/submatview/materializer.go b/agent/submatview/materializer.go index 42754c914da08..240f6cfafbb4e 100644 --- a/agent/submatview/materializer.go +++ b/agent/submatview/materializer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package submatview diff --git a/agent/submatview/rpc_materializer.go b/agent/submatview/rpc_materializer.go index dfeb90172a695..855576b1b339a 100644 --- a/agent/submatview/rpc_materializer.go +++ b/agent/submatview/rpc_materializer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package submatview diff --git a/agent/submatview/store.go b/agent/submatview/store.go index ccfc319e2f4cc..1f189121264c4 100644 --- a/agent/submatview/store.go +++ b/agent/submatview/store.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package submatview diff --git a/agent/submatview/store_integration_test.go b/agent/submatview/store_integration_test.go index 45ba11d3cc3cf..3b6d9fbc2b57d 100644 --- a/agent/submatview/store_integration_test.go +++ b/agent/submatview/store_integration_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package submatview_test diff --git a/agent/submatview/store_test.go b/agent/submatview/store_test.go index c878654d89be9..36a92e7ec02b7 100644 --- a/agent/submatview/store_test.go +++ b/agent/submatview/store_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package submatview diff --git a/agent/submatview/streaming_test.go b/agent/submatview/streaming_test.go index 54b28d1abe6bc..223babf530b13 100644 --- a/agent/submatview/streaming_test.go +++ b/agent/submatview/streaming_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package submatview diff --git a/agent/systemd/notify.go b/agent/systemd/notify.go index 60c71947dc8a4..88ce2c2ece95a 100644 --- a/agent/systemd/notify.go +++ b/agent/systemd/notify.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package systemd diff --git a/agent/testagent.go b/agent/testagent.go index a18dee1eada53..4e80e309e90e9 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -18,9 +18,9 @@ import ( "text/template" "time" - "github.com/armon/go-metrics" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-uuid" + uuid "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/require" "github.com/hashicorp/consul/acl" @@ -76,7 +76,7 @@ type TestAgent struct { // dns is a reference to the first started DNS endpoint. // It is valid after Start(). - dns dnsServer + dns *DNSServer // srv is an HTTPHandlers that may be used to test http endpoints. srv *HTTPHandlers @@ -136,8 +136,8 @@ func NewTestAgentWithConfigFile(t *testing.T, hcl string, configFiles []string) func StartTestAgent(t *testing.T, a TestAgent) *TestAgent { t.Helper() retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) { - r.Helper() - if err := a.Start(r); err != nil { + t.Helper() + if err := a.Start(t); err != nil { r.Fatal(err) } }) @@ -171,7 +171,7 @@ func TestConfigHCL(nodeID string) string { // Start starts a test agent. It returns an error if the agent could not be started. // If no error is returned, the caller must call Shutdown() when finished. -func (a *TestAgent) Start(t testutil.TestingTB) error { +func (a *TestAgent) Start(t *testing.T) error { t.Helper() if a.Agent != nil { return fmt.Errorf("TestAgent already started") @@ -314,23 +314,6 @@ func (a *TestAgent) waitForUp() error { continue // fail, try again } } - - if a.baseDeps.UseV2Resources() { - args := structs.DCSpecificRequest{ - Datacenter: "dc1", - } - var leader string - if err := a.RPC(context.Background(), "Status.Leader", args, &leader); err != nil { - retErr = fmt.Errorf("Status.Leader failed: %v", err) - continue // fail, try again - } - if leader == "" { - retErr = fmt.Errorf("No leader") - continue // fail, try again - } - return nil // success - } - // Ensure we have a leader and a node registration. args := &structs.DCSpecificRequest{ Datacenter: a.Config.Datacenter, @@ -436,7 +419,7 @@ func (a *TestAgent) DNSAddr() string { if a.dns == nil { return "" } - return a.dns.GetAddr() + return a.dns.Addr } func (a *TestAgent) HTTPAddr() string { @@ -495,19 +478,6 @@ func (a *TestAgent) consulConfig() *consul.Config { return c } -// Using sdk/freeport with *retry.R is not possible without changing -// function signatures. We use this shim instead to save the headache -// of syncing sdk submodule updates. -type retryShim struct { - *retry.R - - name string -} - -func (r *retryShim) Name() string { - return r.name -} - // pickRandomPorts selects random ports from fixed size random blocks of // ports. This does not eliminate the chance for port conflict but // reduces it significantly with little overhead. Furthermore, asking @@ -516,16 +486,13 @@ func (r *retryShim) Name() string { // chance of port conflicts for concurrently executed test binaries. // Instead of relying on one set of ports to be sufficient we retry // starting the agent with different ports on port conflict. -func randomPortsSource(t testutil.TestingTB, useHTTPS bool) string { - var ports []int - retry.RunWith(retry.TwoSeconds(), t, func(r *retry.R) { - ports = freeport.GetN(r, 7) - }) +func randomPortsSource(t *testing.T, useHTTPS bool) string { + ports := freeport.GetN(t, 8) var http, https int if useHTTPS { http = -1 - https = ports[1] + https = ports[2] } else { http = ports[1] https = -1 @@ -536,11 +503,11 @@ func randomPortsSource(t testutil.TestingTB, useHTTPS bool) string { dns = ` + strconv.Itoa(ports[0]) + ` http = ` + strconv.Itoa(http) + ` https = ` + strconv.Itoa(https) + ` - serf_lan = ` + strconv.Itoa(ports[2]) + ` - serf_wan = ` + strconv.Itoa(ports[3]) + ` - server = ` + strconv.Itoa(ports[4]) + ` - grpc = ` + strconv.Itoa(ports[5]) + ` - grpc_tls = ` + strconv.Itoa(ports[6]) + ` + serf_lan = ` + strconv.Itoa(ports[3]) + ` + serf_wan = ` + strconv.Itoa(ports[4]) + ` + server = ` + strconv.Itoa(ports[5]) + ` + grpc = ` + strconv.Itoa(ports[6]) + ` + grpc_tls = ` + strconv.Itoa(ports[7]) + ` } ` } @@ -635,7 +602,6 @@ type TestACLConfigParams struct { DefaultToken string AgentRecoveryToken string ReplicationToken string - DNSToken string EnableTokenReplication bool } @@ -654,8 +620,7 @@ func (p *TestACLConfigParams) HasConfiguredTokens() bool { p.AgentToken != "" || p.DefaultToken != "" || p.AgentRecoveryToken != "" || - p.ReplicationToken != "" || - p.DNSToken != "" + p.ReplicationToken != "" } func TestACLConfigNew() string { @@ -665,7 +630,6 @@ func TestACLConfigNew() string { InitialManagementToken: "root", AgentToken: "root", AgentRecoveryToken: "towel", - DNSToken: "dns", }) } diff --git a/agent/testagent_test.go b/agent/testagent_test.go index 266b6e864cc36..66d1d61f01e53 100644 --- a/agent/testagent_test.go +++ b/agent/testagent_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/token/persistence.go b/agent/token/persistence.go index 1a3898e12e8d4..9d543b30edf9d 100644 --- a/agent/token/persistence.go +++ b/agent/token/persistence.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package token @@ -26,7 +26,6 @@ type Config struct { ACLAgentRecoveryToken string ACLReplicationToken string ACLConfigFileRegistrationToken string - ACLDNSToken string EnterpriseConfig } @@ -78,7 +77,6 @@ type persistedTokens struct { Default string `json:"default,omitempty"` Agent string `json:"agent,omitempty"` ConfigFileRegistration string `json:"config_file_service_registration,omitempty"` - DNS string `json:"dns,omitempty"` } type fileStore struct { @@ -146,16 +144,6 @@ func loadTokens(s *Store, cfg Config, tokens persistedTokens, logger Logger) { s.UpdateConfigFileRegistrationToken(cfg.ACLConfigFileRegistrationToken, TokenSourceConfig) } - if tokens.DNS != "" { - s.UpdateDNSToken(tokens.DNS, TokenSourceAPI) - - if cfg.ACLDNSToken != "" { - logger.Warn("\"dns\" token present in both the configuration and persisted token store, using the persisted token") - } - } else { - s.UpdateDNSToken(cfg.ACLDNSToken, TokenSourceConfig) - } - loadEnterpriseTokens(s, cfg) } @@ -218,10 +206,6 @@ func (p *fileStore) saveToFile(s *Store) error { tokens.ConfigFileRegistration = tok } - if tok, source := s.DNSTokenAndSource(); tok != "" && source == TokenSourceAPI { - tokens.DNS = tok - } - data, err := json.Marshal(tokens) if err != nil { p.logger.Warn("failed to persist tokens", "error", err) diff --git a/agent/token/persistence_test.go b/agent/token/persistence_test.go index 4351efd1358e3..093515f70e434 100644 --- a/agent/token/persistence_test.go +++ b/agent/token/persistence_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package token @@ -8,10 +8,9 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/sdk/testutil" ) func TestStore_Load(t *testing.T) { @@ -28,7 +27,6 @@ func TestStore_Load(t *testing.T) { ACLDefaultToken: "charlie", ACLReplicationToken: "delta", ACLConfigFileRegistrationToken: "echo", - ACLDNSToken: "foxtrot", } require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "alfa", store.AgentToken()) @@ -36,69 +34,62 @@ func TestStore_Load(t *testing.T) { require.Equal(t, "charlie", store.UserToken()) require.Equal(t, "delta", store.ReplicationToken()) require.Equal(t, "echo", store.ConfigFileRegistrationToken()) - require.Equal(t, "foxtrot", store.DNSToken()) }) t.Run("updated from Config", func(t *testing.T) { cfg := Config{ DataDir: dataDir, - ACLDefaultToken: "sierra", - ACLAgentToken: "tango", - ACLAgentRecoveryToken: "uniform", - ACLReplicationToken: "victor", - ACLConfigFileRegistrationToken: "xray", - ACLDNSToken: "zulu", + ACLDefaultToken: "echo", + ACLAgentToken: "foxtrot", + ACLAgentRecoveryToken: "golf", + ACLReplicationToken: "hotel", + ACLConfigFileRegistrationToken: "india", } // ensures no error for missing persisted tokens file require.NoError(t, store.Load(cfg, logger)) - require.Equal(t, "sierra", store.UserToken()) - require.Equal(t, "tango", store.AgentToken()) - require.Equal(t, "uniform", store.AgentRecoveryToken()) - require.Equal(t, "victor", store.ReplicationToken()) - require.Equal(t, "xray", store.ConfigFileRegistrationToken()) - require.Equal(t, "zulu", store.DNSToken()) + require.Equal(t, "echo", store.UserToken()) + require.Equal(t, "foxtrot", store.AgentToken()) + require.Equal(t, "golf", store.AgentRecoveryToken()) + require.Equal(t, "hotel", store.ReplicationToken()) + require.Equal(t, "india", store.ConfigFileRegistrationToken()) }) t.Run("with persisted tokens", func(t *testing.T) { cfg := Config{ DataDir: dataDir, - ACLDefaultToken: "alpha", - ACLAgentToken: "bravo", - ACLAgentRecoveryToken: "charlie", - ACLReplicationToken: "delta", - ACLConfigFileRegistrationToken: "echo", - ACLDNSToken: "foxtrot", + ACLDefaultToken: "echo", + ACLAgentToken: "foxtrot", + ACLAgentRecoveryToken: "golf", + ACLReplicationToken: "hotel", + ACLConfigFileRegistrationToken: "delta", } tokens := `{ - "agent" : "golf", - "agent_recovery" : "hotel", - "default": "india", - "replication": "juliet", - "config_file_service_registration": "kilo", - "dns": "lima" + "agent" : "india", + "agent_recovery" : "juliett", + "default": "kilo", + "replication": "lima", + "config_file_service_registration": "mike" }` require.NoError(t, os.WriteFile(tokenFile, []byte(tokens), 0600)) require.NoError(t, store.Load(cfg, logger)) // no updates since token persistence is not enabled - require.Equal(t, "alpha", store.UserToken()) - require.Equal(t, "bravo", store.AgentToken()) - require.Equal(t, "charlie", store.AgentRecoveryToken()) - require.Equal(t, "delta", store.ReplicationToken()) - require.Equal(t, "echo", store.ConfigFileRegistrationToken()) - require.Equal(t, "foxtrot", store.DNSToken()) + require.Equal(t, "echo", store.UserToken()) + require.Equal(t, "foxtrot", store.AgentToken()) + require.Equal(t, "golf", store.AgentRecoveryToken()) + require.Equal(t, "hotel", store.ReplicationToken()) + require.Equal(t, "delta", store.ConfigFileRegistrationToken()) cfg.EnablePersistence = true require.NoError(t, store.Load(cfg, logger)) - require.Equal(t, "golf", store.AgentToken()) - require.Equal(t, "hotel", store.AgentRecoveryToken()) - require.Equal(t, "india", store.UserToken()) - require.Equal(t, "juliet", store.ReplicationToken()) - require.Equal(t, "kilo", store.ConfigFileRegistrationToken()) - require.Equal(t, "lima", store.DNSToken()) + require.Equal(t, "india", store.AgentToken()) + require.Equal(t, "juliett", store.AgentRecoveryToken()) + require.Equal(t, "kilo", store.UserToken()) + require.Equal(t, "lima", store.ReplicationToken()) + require.Equal(t, "mike", store.ConfigFileRegistrationToken()) // check store persistence was enabled require.NotNil(t, store.persistence) @@ -124,8 +115,7 @@ func TestStore_Load(t *testing.T) { "agent_recovery" : "november", "default": "oscar", "replication" : "papa", - "config_file_service_registration" : "lima", - "dns": "kilo" + "config_file_service_registration" : "lima" }` cfg := Config{ @@ -136,7 +126,6 @@ func TestStore_Load(t *testing.T) { ACLAgentRecoveryToken: "sierra", ACLReplicationToken: "tango", ACLConfigFileRegistrationToken: "uniform", - ACLDNSToken: "victor", } require.NoError(t, os.WriteFile(tokenFile, []byte(tokens), 0600)) @@ -147,48 +136,43 @@ func TestStore_Load(t *testing.T) { require.Equal(t, "oscar", store.UserToken()) require.Equal(t, "papa", store.ReplicationToken()) require.Equal(t, "lima", store.ConfigFileRegistrationToken()) - require.Equal(t, "kilo", store.DNSToken()) }) t.Run("with some persisted tokens", func(t *testing.T) { tokens := `{ - "agent" : "xray", - "agent_recovery" : "zulu" + "agent" : "uniform", + "agent_recovery" : "victor" }` cfg := Config{ EnablePersistence: true, DataDir: dataDir, - ACLDefaultToken: "alpha", - ACLAgentToken: "bravo", - ACLAgentRecoveryToken: "charlie", - ACLReplicationToken: "delta", - ACLConfigFileRegistrationToken: "echo", - ACLDNSToken: "foxtrot", + ACLDefaultToken: "whiskey", + ACLAgentToken: "xray", + ACLAgentRecoveryToken: "yankee", + ACLReplicationToken: "zulu", + ACLConfigFileRegistrationToken: "victor", } require.NoError(t, os.WriteFile(tokenFile, []byte(tokens), 0600)) require.NoError(t, store.Load(cfg, logger)) - require.Equal(t, "xray", store.AgentToken()) - require.Equal(t, "zulu", store.AgentRecoveryToken()) - - require.Equal(t, "alpha", store.UserToken()) - require.Equal(t, "delta", store.ReplicationToken()) - require.Equal(t, "echo", store.ConfigFileRegistrationToken()) - require.Equal(t, "foxtrot", store.DNSToken()) + require.Equal(t, "uniform", store.AgentToken()) + require.Equal(t, "victor", store.AgentRecoveryToken()) + require.Equal(t, "whiskey", store.UserToken()) + require.Equal(t, "zulu", store.ReplicationToken()) + require.Equal(t, "victor", store.ConfigFileRegistrationToken()) }) t.Run("persisted file contains invalid data", func(t *testing.T) { cfg := Config{ EnablePersistence: true, DataDir: dataDir, - ACLDefaultToken: "alpha", - ACLAgentToken: "bravo", - ACLAgentRecoveryToken: "charlie", - ACLReplicationToken: "delta", - ACLConfigFileRegistrationToken: "echo", - ACLDNSToken: "foxtrot", + ACLDefaultToken: "one", + ACLAgentToken: "two", + ACLAgentRecoveryToken: "three", + ACLReplicationToken: "four", + ACLConfigFileRegistrationToken: "five", } require.NoError(t, os.WriteFile(tokenFile, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, 0600)) @@ -196,12 +180,11 @@ func TestStore_Load(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "failed to decode tokens file") - require.Equal(t, "alpha", store.UserToken()) - require.Equal(t, "bravo", store.AgentToken()) - require.Equal(t, "charlie", store.AgentRecoveryToken()) - require.Equal(t, "delta", store.ReplicationToken()) - require.Equal(t, "echo", store.ConfigFileRegistrationToken()) - require.Equal(t, "foxtrot", store.DNSToken()) + require.Equal(t, "one", store.UserToken()) + require.Equal(t, "two", store.AgentToken()) + require.Equal(t, "three", store.AgentRecoveryToken()) + require.Equal(t, "four", store.ReplicationToken()) + require.Equal(t, "five", store.ConfigFileRegistrationToken()) }) t.Run("persisted file contains invalid json", func(t *testing.T) { @@ -211,9 +194,8 @@ func TestStore_Load(t *testing.T) { ACLDefaultToken: "alfa", ACLAgentToken: "bravo", ACLAgentRecoveryToken: "charlie", - ACLReplicationToken: "delta", - ACLConfigFileRegistrationToken: "echo", - ACLDNSToken: "foxtrot", + ACLReplicationToken: "foxtrot", + ACLConfigFileRegistrationToken: "golf", } require.NoError(t, os.WriteFile(tokenFile, []byte("[1,2,3]"), 0600)) @@ -224,31 +206,23 @@ func TestStore_Load(t *testing.T) { require.Equal(t, "alfa", store.UserToken()) require.Equal(t, "bravo", store.AgentToken()) require.Equal(t, "charlie", store.AgentRecoveryToken()) - require.Equal(t, "delta", store.ReplicationToken()) - require.Equal(t, "echo", store.ConfigFileRegistrationToken()) - require.Equal(t, "foxtrot", store.DNSToken()) + require.Equal(t, "foxtrot", store.ReplicationToken()) + require.Equal(t, "golf", store.ConfigFileRegistrationToken()) }) } func TestStore_WithPersistenceLock(t *testing.T) { - // ACLDefaultToken: alpha --> sierra - // ACLAgentToken: bravo --> tango - // ACLAgentRecoveryToken: charlie --> uniform - // ACLReplicationToken: delta --> victor - // ACLConfigFileRegistrationToken: echo --> xray - // ACLDNSToken: foxtrot --> zulu setupStore := func() (string, *Store) { dataDir := testutil.TempDir(t, "datadir") store := new(Store) cfg := Config{ EnablePersistence: true, DataDir: dataDir, - ACLDefaultToken: "alpha", - ACLAgentToken: "bravo", - ACLAgentRecoveryToken: "charlie", - ACLReplicationToken: "delta", - ACLConfigFileRegistrationToken: "echo", - ACLDNSToken: "foxtrot", + ACLDefaultToken: "default-token", + ACLAgentToken: "agent-token", + ACLAgentRecoveryToken: "recovery-token", + ACLReplicationToken: "replication-token", + ACLConfigFileRegistrationToken: "registration-token", } err := store.Load(cfg, hclog.New(nil)) require.NoError(t, err) @@ -266,39 +240,37 @@ func TestStore_WithPersistenceLock(t *testing.T) { t.Run("persist some tokens", func(t *testing.T) { dataDir, store := setupStore() err := store.WithPersistenceLock(func() error { - require.True(t, store.UpdateUserToken("sierra", TokenSourceAPI)) - require.True(t, store.UpdateAgentRecoveryToken("tango", TokenSourceAPI)) + require.True(t, store.UpdateUserToken("the-new-default-token", TokenSourceAPI)) + require.True(t, store.UpdateAgentRecoveryToken("the-new-recovery-token", TokenSourceAPI)) return nil }) require.NoError(t, err) // Only API-sourced tokens are persisted. requirePersistedTokens(t, dataDir, persistedTokens{ - Default: "sierra", - AgentRecovery: "tango", + Default: "the-new-default-token", + AgentRecovery: "the-new-recovery-token", }) }) t.Run("persist all tokens", func(t *testing.T) { dataDir, store := setupStore() err := store.WithPersistenceLock(func() error { - require.True(t, store.UpdateUserToken("sierra", TokenSourceAPI)) - require.True(t, store.UpdateAgentToken("tango", TokenSourceAPI)) - require.True(t, store.UpdateAgentRecoveryToken("uniform", TokenSourceAPI)) - require.True(t, store.UpdateReplicationToken("victor", TokenSourceAPI)) - require.True(t, store.UpdateConfigFileRegistrationToken("xray", TokenSourceAPI)) - require.True(t, store.UpdateDNSToken("zulu", TokenSourceAPI)) + require.True(t, store.UpdateUserToken("the-new-default-token", TokenSourceAPI)) + require.True(t, store.UpdateAgentToken("the-new-agent-token", TokenSourceAPI)) + require.True(t, store.UpdateAgentRecoveryToken("the-new-recovery-token", TokenSourceAPI)) + require.True(t, store.UpdateReplicationToken("the-new-replication-token", TokenSourceAPI)) + require.True(t, store.UpdateConfigFileRegistrationToken("the-new-registration-token", TokenSourceAPI)) return nil }) require.NoError(t, err) requirePersistedTokens(t, dataDir, persistedTokens{ - Default: "sierra", - Agent: "tango", - AgentRecovery: "uniform", - Replication: "victor", - ConfigFileRegistration: "xray", - DNS: "zulu", + Default: "the-new-default-token", + Agent: "the-new-agent-token", + AgentRecovery: "the-new-recovery-token", + Replication: "the-new-replication-token", + ConfigFileRegistration: "the-new-registration-token", }) }) diff --git a/agent/token/store.go b/agent/token/store.go index 848d539a2a806..b0f9732a8cc6f 100644 --- a/agent/token/store.go +++ b/agent/token/store.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package token @@ -24,7 +24,6 @@ const ( TokenKindUser TokenKindReplication TokenKindConfigFileRegistration - TokenKindDNS ) type watcher struct { @@ -53,7 +52,7 @@ type Store struct { // also be used for agent operations if the agent token isn't set. userToken string - // userTokenSource indicates where this token originated from. + // userTokenSource indicates where this token originated from userTokenSource TokenSource // agentToken is used for internal agent operations like self-registering @@ -61,7 +60,7 @@ type Store struct { // user-initiated operations. agentToken string - // agentTokenSource indicates where this token originated from. + // agentTokenSource indicates where this token originated from agentTokenSource TokenSource // agentRecoveryToken is a special token that's only used locally for @@ -69,30 +68,23 @@ type Store struct { // available. agentRecoveryToken string - // agentRecoveryTokenSource indicates where this token originated from. + // agentRecoveryTokenSource indicates where this token originated from agentRecoveryTokenSource TokenSource // replicationToken is a special token that's used by servers to // replicate data from the primary datacenter. replicationToken string - // replicationTokenSource indicates where this token originated from. + // replicationTokenSource indicates where this token originated from replicationTokenSource TokenSource // configFileRegistrationToken is used to register services and checks // that are defined in configuration files. configFileRegistrationToken string - // configFileRegistrationTokenSource indicates where this token originated from. + // configFileRegistrationTokenSource indicates where this token originated from configFileRegistrationTokenSource TokenSource - // dnsToken is a special token that is used as the implicit token for DNS requests - // as well as for DNS-specific RPC requests. - dnsToken string - - // dnsTokenSource indicates where the dnsToken originated from. - dnsTokenSource TokenSource - watchers map[int]watcher watcherIndex int @@ -212,12 +204,6 @@ func (t *Store) UpdateConfigFileRegistrationToken(token string, source TokenSour &t.configFileRegistrationTokenSource, TokenKindConfigFileRegistration) } -// UpdateDNSToken replaces the current DNS token in the store. -// Returns true if it was changed. -func (t *Store) UpdateDNSToken(token string, source TokenSource) bool { - return t.updateToken(token, source, &t.dnsToken, &t.dnsTokenSource, TokenKindDNS) -} - func (t *Store) updateToken(token string, source TokenSource, dstToken *string, dstSource *TokenSource, kind TokenKind) bool { t.l.Lock() changed := *dstToken != token || *dstSource != source @@ -275,13 +261,6 @@ func (t *Store) ConfigFileRegistrationToken() string { return t.configFileRegistrationToken } -func (t *Store) DNSToken() string { - t.l.RLock() - defer t.l.RUnlock() - - return t.dnsToken -} - // UserToken returns the best token to use for user operations. func (t *Store) UserTokenAndSource() (string, TokenSource) { t.l.RLock() @@ -320,14 +299,6 @@ func (t *Store) ConfigFileRegistrationTokenAndSource() (string, TokenSource) { return t.configFileRegistrationToken, t.configFileRegistrationTokenSource } -// DNSTokenAndSource returns the best token to use for DNS-specific RPC requests and DNS requests -func (t *Store) DNSTokenAndSource() (string, TokenSource) { - t.l.RLock() - defer t.l.RUnlock() - - return t.dnsToken, t.dnsTokenSource -} - // IsAgentRecoveryToken checks to see if a given token is the agent recovery token. // This will never match an empty token for safety. func (t *Store) IsAgentRecoveryToken(token string) bool { diff --git a/agent/token/store_ce.go b/agent/token/store_ce.go index af7732a902088..87fefdbbe0480 100644 --- a/agent/token/store_ce.go +++ b/agent/token/store_ce.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package token diff --git a/agent/token/store_test.go b/agent/token/store_test.go index 2f91614085c4f..8d0992f229ca2 100644 --- a/agent/token/store_test.go +++ b/agent/token/store_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package token @@ -21,8 +21,6 @@ func TestStore_RegularTokens(t *testing.T) { replSource TokenSource registration string registrationSource TokenSource - dns string - dnsSource TokenSource } tests := []struct { @@ -97,23 +95,11 @@ func TestStore_RegularTokens(t *testing.T) { raw: tokens{registration: "G", registrationSource: TokenSourceAPI}, effective: tokens{registration: "G"}, }, - { - name: "set dns - config", - set: tokens{dns: "D", dnsSource: TokenSourceConfig}, - raw: tokens{dns: "D", dnsSource: TokenSourceConfig}, - effective: tokens{dns: "D"}, - }, - { - name: "set dns - api", - set: tokens{dns: "D", dnsSource: TokenSourceAPI}, - raw: tokens{dns: "D", dnsSource: TokenSourceAPI}, - effective: tokens{dns: "D"}, - }, { name: "set all", - set: tokens{user: "U", agent: "A", repl: "R", recovery: "M", registration: "G", dns: "D"}, - raw: tokens{user: "U", agent: "A", repl: "R", recovery: "M", registration: "G", dns: "D"}, - effective: tokens{user: "U", agent: "A", repl: "R", recovery: "M", registration: "G", dns: "D"}, + set: tokens{user: "U", agent: "A", repl: "R", recovery: "M", registration: "G"}, + raw: tokens{user: "U", agent: "A", repl: "R", recovery: "M", registration: "G"}, + effective: tokens{user: "U", agent: "A", repl: "R", recovery: "M", registration: "G"}, }, } for _, tt := range tests { @@ -139,24 +125,18 @@ func TestStore_RegularTokens(t *testing.T) { require.True(t, s.UpdateConfigFileRegistrationToken(tt.set.registration, tt.set.registrationSource)) } - if tt.set.dns != "" { - require.True(t, s.UpdateDNSToken(tt.set.dns, tt.set.dnsSource)) - } - // If they don't change then they return false. require.False(t, s.UpdateUserToken(tt.set.user, tt.set.userSource)) require.False(t, s.UpdateAgentToken(tt.set.agent, tt.set.agentSource)) require.False(t, s.UpdateReplicationToken(tt.set.repl, tt.set.replSource)) require.False(t, s.UpdateAgentRecoveryToken(tt.set.recovery, tt.set.recoverySource)) require.False(t, s.UpdateConfigFileRegistrationToken(tt.set.registration, tt.set.registrationSource)) - require.False(t, s.UpdateDNSToken(tt.set.dns, tt.set.dnsSource)) require.Equal(t, tt.effective.user, s.UserToken()) require.Equal(t, tt.effective.agent, s.AgentToken()) require.Equal(t, tt.effective.recovery, s.AgentRecoveryToken()) require.Equal(t, tt.effective.repl, s.ReplicationToken()) require.Equal(t, tt.effective.registration, s.ConfigFileRegistrationToken()) - require.Equal(t, tt.effective.dns, s.DNSToken()) tok, src := s.UserTokenAndSource() require.Equal(t, tt.raw.user, tok) @@ -177,10 +157,6 @@ func TestStore_RegularTokens(t *testing.T) { tok, src = s.ConfigFileRegistrationTokenAndSource() require.Equal(t, tt.raw.registration, tok) require.Equal(t, tt.raw.registrationSource, src) - - tok, src = s.DNSTokenAndSource() - require.Equal(t, tt.raw.dns, tok) - require.Equal(t, tt.raw.dnsSource, src) }) } } @@ -235,7 +211,6 @@ func TestStore_Notify(t *testing.T) { replicationNotifier := newNotification(t, s, TokenKindReplication) replicationNotifier2 := newNotification(t, s, TokenKindReplication) registrationNotifier := newNotification(t, s, TokenKindConfigFileRegistration) - dnsNotifier := newNotification(t, s, TokenKindDNS) // perform an update of the user token require.True(t, s.UpdateUserToken("edcae2a2-3b51-4864-b412-c7a568f49cb1", TokenSourceConfig)) @@ -249,7 +224,6 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) requireNotNotified(t, registrationNotifier.Ch) - requireNotNotified(t, dnsNotifier.Ch) // update the agent token which should send a notification to the agent notifier. require.True(t, s.UpdateAgentToken("5d748ec2-d536-461f-8e2a-1f7eae98d559", TokenSourceAPI)) @@ -260,7 +234,6 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) requireNotNotified(t, registrationNotifier.Ch) - requireNotNotified(t, dnsNotifier.Ch) // update the agent recovery token which should send a notification to the agent recovery notifier. require.True(t, s.UpdateAgentRecoveryToken("789badc8-f850-43e1-8742-9b9f484957cc", TokenSourceAPI)) @@ -271,7 +244,6 @@ func TestStore_Notify(t *testing.T) { requireNotifiedOnce(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) requireNotNotified(t, registrationNotifier.Ch) - requireNotNotified(t, dnsNotifier.Ch) // update the replication token which should send a notification to the replication notifier. require.True(t, s.UpdateReplicationToken("789badc8-f850-43e1-8742-9b9f484957cc", TokenSourceAPI)) @@ -282,7 +254,6 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotifiedOnce(t, replicationNotifier2.Ch) requireNotNotified(t, registrationNotifier.Ch) - requireNotNotified(t, dnsNotifier.Ch) s.StopNotify(replicationNotifier2) @@ -295,7 +266,6 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) requireNotNotified(t, registrationNotifier.Ch) - requireNotNotified(t, dnsNotifier.Ch) // update the config file registration token which should send a notification to the replication notifier. require.True(t, s.UpdateConfigFileRegistrationToken("82fe7362-7d83-4f43-bb27-c35f1f15083c", TokenSourceAPI)) @@ -306,18 +276,6 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) requireNotifiedOnce(t, registrationNotifier.Ch) - requireNotNotified(t, dnsNotifier.Ch) - - // update the dns token which should send a notification to the replication notifier. - require.True(t, s.UpdateDNSToken("ce8e829f-dc45-4ba7-9dd3-1dbbe070f573", TokenSourceAPI)) - - requireNotNotified(t, agentNotifier.Ch) - requireNotNotified(t, userNotifier.Ch) - requireNotNotified(t, replicationNotifier.Ch) - requireNotNotified(t, agentRecoveryNotifier.Ch) - requireNotNotified(t, replicationNotifier2.Ch) - requireNotNotified(t, registrationNotifier.Ch) - requireNotifiedOnce(t, dnsNotifier.Ch) // request updates that are not changes require.False(t, s.UpdateAgentToken("5d748ec2-d536-461f-8e2a-1f7eae98d559", TokenSourceAPI)) @@ -325,7 +283,6 @@ func TestStore_Notify(t *testing.T) { require.False(t, s.UpdateUserToken("47788919-f944-476a-bda5-446d64be1df8", TokenSourceAPI)) require.False(t, s.UpdateReplicationToken("eb0b56b9-fa65-4ae1-902a-c64457c62ac6", TokenSourceAPI)) require.False(t, s.UpdateConfigFileRegistrationToken("82fe7362-7d83-4f43-bb27-c35f1f15083c", TokenSourceAPI)) - require.False(t, s.UpdateDNSToken("ce8e829f-dc45-4ba7-9dd3-1dbbe070f573", TokenSourceAPI)) // ensure that notifications were not sent requireNotNotified(t, agentNotifier.Ch) @@ -333,5 +290,4 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, replicationNotifier.Ch) requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotNotified(t, registrationNotifier.Ch) - requireNotNotified(t, dnsNotifier.Ch) } diff --git a/agent/translate_addr.go b/agent/translate_addr.go index 326117cc98661..9be80ebc0f8c8 100644 --- a/agent/translate_addr.go +++ b/agent/translate_addr.go @@ -1,16 +1,25 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent import ( "fmt" - "github.com/hashicorp/consul/internal/dnsutil" "net" "github.com/hashicorp/consul/agent/structs" ) +type TranslateAddressAccept int + +const ( + TranslateAddressAcceptDomain TranslateAddressAccept = 1 << iota + TranslateAddressAcceptIPv4 + TranslateAddressAcceptIPv6 + + TranslateAddressAcceptAny TranslateAddressAccept = ^0 +) + // TranslateServicePort is used to provide the final, translated port for a service, // depending on how the agent and the other node are configured. The dc // parameter is the dc the datacenter this node is from. @@ -26,7 +35,7 @@ func (a *Agent) TranslateServicePort(dc string, port int, taggedAddresses map[st // TranslateServiceAddress is used to provide the final, translated address for a node, // depending on how the agent and the other node are configured. The dc // parameter is the dc the datacenter this node is from. -func (a *Agent) TranslateServiceAddress(dc string, addr string, taggedAddresses map[string]structs.ServiceAddress, accept dnsutil.TranslateAddressAccept) string { +func (a *Agent) TranslateServiceAddress(dc string, addr string, taggedAddresses map[string]structs.ServiceAddress, accept TranslateAddressAccept) string { def := addr v4 := taggedAddresses[structs.TaggedAddressLANIPv4].Address v6 := taggedAddresses[structs.TaggedAddressLANIPv6].Address @@ -50,7 +59,7 @@ func (a *Agent) TranslateServiceAddress(dc string, addr string, taggedAddresses // TranslateAddress is used to provide the final, translated address for a node, // depending on how the agent and the other node are configured. The dc // parameter is the dc the datacenter this node is from. -func (a *Agent) TranslateAddress(dc string, addr string, taggedAddresses map[string]string, accept dnsutil.TranslateAddressAccept) string { +func (a *Agent) TranslateAddress(dc string, addr string, taggedAddresses map[string]string, accept TranslateAddressAccept) string { def := addr v4 := taggedAddresses[structs.TaggedAddressLANIPv4] v6 := taggedAddresses[structs.TaggedAddressLANIPv6] @@ -71,22 +80,22 @@ func (a *Agent) TranslateAddress(dc string, addr string, taggedAddresses map[str return translateAddressAccept(accept, def, v4, v6) } -func translateAddressAccept(accept dnsutil.TranslateAddressAccept, def, v4, v6 string) string { +func translateAddressAccept(accept TranslateAddressAccept, def, v4, v6 string) string { switch { - case accept&dnsutil.TranslateAddressAcceptIPv6 > 0 && v6 != "": + case accept&TranslateAddressAcceptIPv6 > 0 && v6 != "": return v6 - case accept&dnsutil.TranslateAddressAcceptIPv4 > 0 && v4 != "": + case accept&TranslateAddressAcceptIPv4 > 0 && v4 != "": return v4 - case accept&dnsutil.TranslateAddressAcceptAny > 0 && def != "": + case accept&TranslateAddressAcceptAny > 0 && def != "": return def default: defIP := net.ParseIP(def) switch { - case defIP != nil && defIP.To4() != nil && accept&dnsutil.TranslateAddressAcceptIPv4 > 0: + case defIP != nil && defIP.To4() != nil && accept&TranslateAddressAcceptIPv4 > 0: return def - case defIP != nil && defIP.To4() == nil && accept&dnsutil.TranslateAddressAcceptIPv6 > 0: + case defIP != nil && defIP.To4() == nil && accept&TranslateAddressAcceptIPv6 > 0: return def - case defIP == nil && accept&dnsutil.TranslateAddressAcceptDomain > 0: + case defIP == nil && accept&TranslateAddressAcceptDomain > 0: return def } } @@ -97,7 +106,7 @@ func translateAddressAccept(accept dnsutil.TranslateAddressAccept, def, v4, v6 s // TranslateAddresses translates addresses in the given structure into the // final, translated address, depending on how the agent and the other node are // configured. The dc parameter is the datacenter this structure is from. -func (a *Agent) TranslateAddresses(dc string, subj interface{}, accept dnsutil.TranslateAddressAccept) { +func (a *Agent) TranslateAddresses(dc string, subj interface{}, accept TranslateAddressAccept) { // CAUTION - SUBTLE! An agent running on a server can, in some cases, // return pointers directly into the immutable state store for // performance (it's via the in-memory RPC mechanism). It's never safe diff --git a/agent/txn_endpoint.go b/agent/txn_endpoint.go index 04dcd7fa39022..7c2f64c1a9317 100644 --- a/agent/txn_endpoint.go +++ b/agent/txn_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/txn_endpoint_test.go b/agent/txn_endpoint_test.go index fe19ed825301c..19c5925ba7a9c 100644 --- a/agent/txn_endpoint_test.go +++ b/agent/txn_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent diff --git a/agent/ui_endpoint.go b/agent/ui_endpoint.go index 94c3545647052..6ef69b84035cf 100644 --- a/agent/ui_endpoint.go +++ b/agent/ui_endpoint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -190,6 +190,7 @@ func AgentMembersMapAddrVer(s *HTTPHandlers, req *http.Request) (map[string]stri filter := consul.LANMemberFilter{ Partition: entMeta.PartitionOrDefault(), } + if acl.IsDefaultPartition(filter.Partition) { filter.AllSegments = true } diff --git a/agent/ui_endpoint_ce_test.go b/agent/ui_endpoint_ce_test.go index 2cb3f905e28b0..3e57fca667e87 100644 --- a/agent/ui_endpoint_ce_test.go +++ b/agent/ui_endpoint_ce_test.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 //go:build !consulent +// +build !consulent package agent diff --git a/agent/ui_endpoint_test.go b/agent/ui_endpoint_test.go index dd1c6d8134b2f..b39a2f31dc931 100644 --- a/agent/ui_endpoint_test.go +++ b/agent/ui_endpoint_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package agent @@ -32,28 +32,6 @@ import ( "github.com/hashicorp/consul/types" ) -func TestUIEndpointsFailInV2(t *testing.T) { - t.Parallel() - - a := NewTestAgent(t, `experiments = ["resource-apis"]`) - - checkRequest := func(method, url string) { - t.Run(method+" "+url, func(t *testing.T) { - assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, "{}") - }) - } - - checkRequest("GET", "/v1/internal/ui/nodes") - checkRequest("GET", "/v1/internal/ui/node/web") - checkRequest("GET", "/v1/internal/ui/services") - checkRequest("GET", "/v1/internal/ui/exported-services") - checkRequest("GET", "/v1/internal/ui/catalog-overview") - checkRequest("GET", "/v1/internal/ui/gateway-services-nodes/web") - checkRequest("GET", "/v1/internal/ui/gateway-intentions/web") - checkRequest("GET", "/v1/internal/ui/service-topology/web") - checkRequest("PUT", "/v1/internal/service-virtual-ip") -} - func TestUIIndex(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/uiserver/buf_index_fs.go b/agent/uiserver/buf_index_fs.go index 283616c36eda6..9ea23316fb10a 100644 --- a/agent/uiserver/buf_index_fs.go +++ b/agent/uiserver/buf_index_fs.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package uiserver diff --git a/agent/uiserver/buffered_file.go b/agent/uiserver/buffered_file.go index daa30c610d29f..5c794dac26908 100644 --- a/agent/uiserver/buffered_file.go +++ b/agent/uiserver/buffered_file.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package uiserver diff --git a/agent/uiserver/dist/index.html b/agent/uiserver/dist/index.html index 438fc074ed301..95c140c67022d 100644 --- a/agent/uiserver/dist/index.html +++ b/agent/uiserver/dist/index.html @@ -1,7 +1,7 @@ diff --git a/agent/uiserver/redirect_fs.go b/agent/uiserver/redirect_fs.go index 4a61ba7b2c14d..66b48e637fdc8 100644 --- a/agent/uiserver/redirect_fs.go +++ b/agent/uiserver/redirect_fs.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package uiserver diff --git a/agent/uiserver/ui_template_data.go b/agent/uiserver/ui_template_data.go index 34d3a453b0fd6..d8d5fc42ba4e3 100644 --- a/agent/uiserver/ui_template_data.go +++ b/agent/uiserver/ui_template_data.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package uiserver @@ -31,14 +31,6 @@ func uiTemplateDataFromConfig(cfg *config.RuntimeConfig) (map[string]interface{} uiCfg["metrics_provider_options"] = json.RawMessage(cfg.UIConfig.MetricsProviderOptionsJSON) } - v2CatalogEnabled := false - for _, experiment := range cfg.Experiments { - if experiment == "resource-apis" { - v2CatalogEnabled = true - break - } - } - d := map[string]interface{}{ "ContentPath": cfg.UIConfig.ContentPath, "ACLsEnabled": cfg.ACLsEnabled, @@ -47,7 +39,6 @@ func uiTemplateDataFromConfig(cfg *config.RuntimeConfig) (map[string]interface{} "LocalDatacenter": cfg.Datacenter, "PrimaryDatacenter": cfg.PrimaryDatacenter, "PeeringEnabled": cfg.PeeringEnabled, - "V2CatalogEnabled": v2CatalogEnabled, } // Also inject additional provider scripts if needed, otherwise strip the diff --git a/agent/uiserver/uiserver.go b/agent/uiserver/uiserver.go index 0cd20c5fda0b4..8cabb8e3e0cd9 100644 --- a/agent/uiserver/uiserver.go +++ b/agent/uiserver/uiserver.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package uiserver diff --git a/agent/uiserver/uiserver_test.go b/agent/uiserver/uiserver_test.go index d86baf1f48f56..c42792fe320bf 100644 --- a/agent/uiserver/uiserver_test.go +++ b/agent/uiserver/uiserver_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 package uiserver @@ -51,8 +51,7 @@ func TestUIServerIndex(t *testing.T) { "metrics_provider": "", "metrics_proxy_enabled": false, "dashboard_url_templates": null - }, - "V2CatalogEnabled": false + } }`, }, { @@ -91,8 +90,7 @@ func TestUIServerIndex(t *testing.T) { }, "metrics_proxy_enabled": false, "dashboard_url_templates": null - }, - "V2CatalogEnabled": false + } }`, }, { @@ -113,8 +111,7 @@ func TestUIServerIndex(t *testing.T) { "metrics_provider": "", "metrics_proxy_enabled": false, "dashboard_url_templates": null - }, - "V2CatalogEnabled": false + } }`, }, { @@ -135,30 +132,7 @@ func TestUIServerIndex(t *testing.T) { "metrics_provider": "", "metrics_proxy_enabled": false, "dashboard_url_templates": null - }, - "V2CatalogEnabled": false - }`, - }, - { - name: "v2 catalog enabled", - cfg: basicUIEnabledConfig(withV2CatalogEnabled()), - path: "/", - wantStatus: http.StatusOK, - wantContains: []string{" **Note**: This information is valid as of Consul 1.17 but some portions may change in future releases. - -## Controller Basics - -A controller consists of several parts: - -1. **The watched type** - This is the main type a controller is watching and reconciling. -2. **Additional watched types** - These are additional types a controller may care about in addition to the main watched type. -3. **Additional custom watches** - These are the watches for things that aren't resources in Consul. -4. **Reconciler** - This is the instance that's responsible for reconciling requests whenever there's an event for the main watched type or for any of the watched types. -5. **Initializer** - This is responsible for anything that needs to be executed when the controller is started. - -A basic controller setup could look like this: - -```go -func barController() controller.Controller { - return controller.NewController("bar", pbexample.BarType). - WithReconciler(barReconciler{}) -} -``` - -barReconciler needs to implement the `Reconcile` method of the `Reconciler` interface. -It's important to note that the `Reconcile` method only gets the request with the `ID` of the main -watched resource and so it's up to the reconcile implementation to fetch the resource and any relevant information needed -to perform the reconciliation. The most basic reconciler could look as follows: - -```go -type barReconciler struct {} - -func (b *barReconciler) Reconcile(ctx context.Context, rt Runtime, req Request) error { - ... -} -``` - -## Watching Additional Resources - -Most of the time, controllers will need to watch more resources in addition to the main watched type. -To set up an additional watch, the main thing we need to figure out is how to map additional watched resource to the main -watched resource. Controller-runtime allows us to implement a mapper function that can take the additional watched resource -as the input and produce reconcile `Requests` for our main watched type. - -To figure out how to map the two resources together, we need to think about the relationship between the two resources. - -There are several common relationship types between resources that are being used currently: -1. Name-alignment: this relationship means that resources are named the same and live in the same tenancy, but have different data. Examples: `Service` and `ServiceEndpoints`, `Workload` and `ProxyStateTemplate`. -2. Selector: this relationship happens when one resource selects another by name or name prefix. Examples: `Service` and `Workload`, `ProxyConfiguration` and `Workload`. -3. Owner: in this relationship, one resource is the owner of another resource. Examples: `Service` and `ServiceEndpoints`, `HealthStatus` and `Workload`. -4. Arbitrary reference: in this relationship, one resource may reference another by some sort of reference. This reference could be a single string in the resource data or a more composite reference containing name, tenancy, and type. Examples: `Workload` and `WorkloadIdentity`, `HTTPRoute` and `Service`. - -Note that it's possible for the two watched resources to have more than one relationship type simultaneously. -For example, `FailoverPolicy` type is name-aligned with a service to which it applies, however, it also contains -references to destination services, and for a controller that reconciles `FailoverPolicy` and watches `Service` -we need to account for both type 1 and type 4 relationship whenever we get an event for a `Service`. - -### Simple Mappers - -Let's look at some simple mapping examples. - -#### Name-aligned resources -If our resources only have a name-aligned relationship, we can map them with a built-in function: - -```go -func barController() controller.Controller { - return controller.NewController("bar", pbexample.BarType). - WithWatch(pbexample.FooType, controller.ReplaceType(pbexample.BarType)). - WithReconciler(barReconciler{}) -} -``` - -Here, all we need to do is replace the type of the `Foo` resource whenever we get an event for it. - -#### Owned resources - -Let's say our `Foo` resource owns `Bar` resources, where any `Foo` resource can own multiple `Bar` resources. -In this case, whenever we see a new event for `Foo`, all we need to do is get all `Bar` resources that `Foo` currently owns. -For this, we can also use a built-in function to set up our watch: - -```go -func MapOwned(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) { - resp, err := rt.Client.ListByOwner(ctx, &pbresource.ListByOwnerRequest{Owner: res.Id}) - if err != nil { - return nil, err - } - - var result []controller.Request - for _, r := range resp.Resources { - result = append(result, controller.Request{ID: r.Id}) - } - - return result, nil -} - -func barController() controller.Controller { - return controller.NewController("bar", pbexample.BarType). - WithWatch(pbexample.FooType, MapOwned). - WithReconciler(barReconciler{}) -} -``` - -### Advanced Mappers and Caches - -For selector or arbitrary reference relationships, the mapping that we choose may need to be more advanced. - -#### Naive mapper implementation - -Let's first consider what a naive mapping function could look like in this case. Let's say that the `Bar` resource -references `Foo` resource by name in the data. Now to watch and map `Foo` resources, we need to be able to find all relevant `Bar` resources -whenever we get an event for a `Foo` resource. - -```go -func MapFoo(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) { - resp, err := rt.Client.List(ctx, &pbresource.ListRequest{Type: pbexample.BarType, Tenancy: res.Id.Tenancy}) - if err != nil { - return nil, err - } - - var result []controller.Request - for _, r := range resp.Resources { - decodedResource, err := resource.Decode[*pbexample.Bar](r) - if err != nil { - return nil, err - } - - // Only add Bar resources that match Foo by name. - if decodedResource.GetData().GetFooName() == res.Id.Name { - result = append(result, controller.Request{ID: r.Id}) - } - } -} -``` - -This approach is fine for cases when the number of `Bar` resources in a cluster is relatively small. If it's not, -then we'd be doing a large `O(N)` search on each `Bar` event which could be too expensive. - -#### Caching Mappers - -For cases when `N` is too large, we'd want to use a caching layer to help us make lookups more efficient so that they -don't require an `O(N)` search of potentially all cluster resources. - -The controller runtime contains a controller cache and the facilities to keep the cache up to date in response to watches. Additionally there are dependency mappers provided for querying the cache. - -_While it is possible to not use the builtin cache and manage state in dependency mappers yourself, this can get quite complex and reasoning about the correct times to track and untrack relationships is tricky to get right. Usage of the cache is therefore the advised approach._ - -At a high level, the controller author provides the indexes to track for each watchedtype and can then query thosfunc fooFromArgs(args ...any) ([]byte, error)e indexes in the { - -}future. The querying can occur during both dependency mapping and during resource reconciliation. - -The following example shows how to configure the "bar" controller to rereconcile a Bar resource whenever a Foo resource is changed that references the Bar - -```go -func fooReferenceFromBar(r *resource.DecodedResource[*pbexample.Bar]) (bool, []byte, error) { - idx := index.IndexFromRefOrID(&pbresource.ID{ - Type: pbexample.FooType, - Tenancy: r.Id.Tenancy, - Name: r.Data.GetFooName(), - }) - - return true, idx, nil -} - -func barController() controller.Controller { - fooIndex := indexers.DecodedSingleIndexer( - "foo", - index.ReferenceOrIDFromArgs, - fooReferenceFromBar, - ) - - return controller.NewController("bar", pbexample.BarType, fooIndex). - WithWatch( - pbexample.FooType, - dependency.CacheListMapper(pbexample.BarType, fooIndex.Name()), - ). - WithReconciler(barReconciler{}) -} -``` - -The controller will now reconcile Bar type resources whenever the Foo type resources they reference are updated. No further tracking is necessary as changes to all Bar types will automatically update the cache. - -One limitation of the cache is that it only has knowledge about the current state of resources. That specifically means that the previous state is forgotten once the cache observes a write. This can be problematic when you want to reconcile a resource to no longer take into account something that previously reference it. - -Lets say there are two types: `Baz` and `ComputedBaz` and a controller that will aggregate all `Baz` resource with some value into a single `ComputedBaz` object. When -a `Baz` resource gets updated to no longer have a value, it should not be represented in the `ComputedBaz` resource. The typical way to work around this is to: - -1. Store references to the resources that were used during reconciliation within the computed/reconciled resource. For types computed by controllers and not expected to be written directly by users a `bound_references` field should be added to the top level resource types message. For other user manageable types the references may need to be stored within the Status field. - -2. Add a cache index to the watch of the computed type (usually the controllers main managed type). This index can use one of the indexers specified within the [`internal/controller/cache/indexers`](../../../internal/controller/cache/indexers/) package. That package contains some builtin functionality around reference indexing. - -3. Update the dependency mappers to query the cache index *in addition to* looking at the current state of the dependent resource. In our example above the `Baz` dependency mapper could use the [`MultiMapper`] to combine querying the cache for `Baz` types that currently should be associated with a `ComputedBaz` and querying the index added in step 2 for previous references. - -#### Footgun: Needing Bound References - -When an interior (mutable) foreign key pointer on watched data is used to -determine the resources's applicability in a dependency mapper, it is subject -to the "orphaned computed resource" problem. - -(An example of this would be a ParentRef on an xRoute, or the Destination field -of a TrafficPermission.) - -When you edit the mutable pointer to point elsewhere, the DependencyMapper will -only witness the NEW value and will trigger reconciles for things derived from -the NEW pointer, but side effects from a prior reconcile using the OLD pointer -will be orphaned until some other event triggers that reconcile (if ever). - -This applies equally to all varieties of controller: - -- creates computed resources -- only updates status conditions on existing resources -- has other external side effects (xDS controller writes envoy config over a stream) - -To solve this we need to collect the list of bound references that were -"ingredients" into a computed resource's output and persist them on the newly -written resource. Then we load them up and index them such that we can use them -to AUGMENT a mapper event with additional maps using the OLD data as well. - -We have only actively worked to solve this for the computed resource flavor of -controller: - -1. The top level of the resource data protobuf needs a - `BoundReferences []*pbresource.Reference` field. - -2. Use a `*resource.BoundReferenceCollector` to capture any resource during - `Reconcile` that directly contributes to the final output resource data - payload. - -3. Call `brc.List()` on the above and set it to the `BoundReferences` field on - the computed resource before persisting. - -4. Use `indexers.BoundRefsIndex` to index this field on the primary type of the - controller. - -5. Create `boundRefsMapper := dependency.CacheListMapper(ZZZ, boundRefsIndex.Name())` - -6. For each watched type, wrap its DependencyMapper with - `dependency.MultiMapper(boundRefsMapper, ZZZ)` - -7. That's it. - -This will cause each reconcile to index the prior list of inputs and augment -the results of future mapper events with historical references. - -### Custom Watches - -In some cases, we may want to trigger reconciles for events that aren't generated from CRUD operations on resources, for example -when Envoy proxy connects or disconnects to a server. Controller-runtime allows us to setup watches from -events that come from a custom event channel. Please see [xds-controller](https://github.com/hashicorp/consul/blob/ecfeb7aac51df8730064d869bb1f2c633a531522/internal/mesh/internal/controllers/xds/controller.go#L40-L41) for examples of custom watches. - -## Statuses - -In many cases, controllers would need to update statuses on resources to let the user know about the successful or unsuccessful -state of a resource. - -These are the guidelines that we recommend for statuses: - -* While status conditions is a list, the Condition type should be treated as a key in a map, meaning a resource should not have two status conditions with the same type. -* Controllers need to both update successful and unsuccessful conditions states. This is because we need to make sure that we clear any failed status conditions. -* Status conditions should be named such that the `True` state is a successful state and `False` state is a failed state. - -## Best Practices - -Below is a list of controller best practices that we've learned so far. Many of them are inspired by [kubebuilder](https://book.kubebuilder.io/reference/good-practices). - -* Avoid monolithic controllers as much as possible. A single controller should only manage a single resource to avoid complexity and race conditions. -* If using cached mappers, aim to write (update or delete entries) to mappers in the `Reconcile` method and read from them in the mapper functions used by watches. -* Fetch all data in the `Reconcile` method and avoid caching it from the mapper functions. This ensures that we get the latest data for each reconciliation. diff --git a/docs/v2-architecture/controller-architecture/testing.md b/docs/v2-architecture/controller-architecture/testing.md deleted file mode 100644 index 99084b37cfaf3..0000000000000 --- a/docs/v2-architecture/controller-architecture/testing.md +++ /dev/null @@ -1,221 +0,0 @@ -# Controller Testing - -For every controller we want to enable 3 types of testing. - -1. Unit Tests - These should live alongside the controller and utilize mocks and the controller.TestController. Where possible split out controller functionality so that other functions can be independently tested. -2. Lightweight integration tests - These should live in an internal//test package. These tests utilize the in-memory resource service and the standard controller manager. There are two types of tests that should be created. - * Lifecycle Integration Tests - These go step by step to modify resources and check what the controller did. They are meant to go through the lifecycle of resources and how they are reconciled. Verifications are typically intermingled with resource updates. - * One-Shot Integration Tests - These tests publish a bunch of resources and then perform all the verifications. These mainly are focused on the controller eventually converging given all the resources thrown at it and aren't as concerned with any intermediate states resources go through. -3. Container based integration tests - These tests live along with our other container based integration tests. They utilize a full multi-node cluster (and sometimes client agents). There are 3 types of tests that can be created here: - * Lifecycle Integration Tests - These are the same as for the lighweight integration tests. - * One-shot IntegrationTests - These are the same as for the lightweight integration tests. - * Upgrade Tests - These are a special form of One-shot Integration tests where the cluster is brought up with some original version, data is pushed in, an upgrade is done and then we verify the consistency of the data post-upgrade. - - -Between the lightweight and container based integration tests there is a lot of duplication in what is being tested. For this reason these integration test bodies should be defined as exported functions within the apigroups test package. The container based tests can then import those packages and invoke the same functionality with minimal overhead. - -See the [internal/catalog/catalogtest](internal/catalog/catalogtest) package for an example. - -For one-shot integration tests, functions to do the resource publishing should be split from functions to perform the verifications. This allows upgrade tests to publish the resources once pre-upgrade and then validate that their correctness post-upgrade without requiring rewriting them. - -Sometimes it may also be a good idea to export functions in the test packages for running a specific controllers integration tests. This is a good idea when the controller will use a different version of a dependency in Consul Enterprise to allow for the enterprise implementations package to invoke the integration tests after setting up the controller with its injected dependency. - -## Unit Test Template - -These tests live alongside controller source. - -```go -package foo - -import ( - "testing" - - "github.com/stretchr/testif/mock" - "github.com/stretchr/testif/require" - "github.com/stretchr/testif/suite" -) - -func TestReconcile(t *testing.T) { - rtest.RunWithTenancies(func(tenancy *pbresource.Tenancy) { - suite.Run(t, &reconcileSuite{tenancy: tenancy}) - }) -} - -type reconcileSuite struct { - suite.Suite - - tenancy *pbresource.Tenancy - - ctx context.Context - ctl *controller.TestController - client *rtest.Client - - // Mock objects needed for testing -} - -func (suite *reconcileSuite) SetupTest() { - suite.ctx = testutil.TestContext(suite.T()) - - // Alternatively it is sometimes useful to use a mock resource service. For that - // you can use github.com/hashicorp/consul/grpcmocks.NewResourceServiceClient - // to create the client. - client := svctest.NewResourceServiceBuilder(). - // register this API groups types. Also register any other - // types this controller depends on. - WithRegisterFns(types.Register). - WithTenancies(suite.tenancy). - Run(suite.T()) - - // Build any mock objects or other dependencies of the controller here. - - // Build the TestController - suite.ctl = controller.NewTestController(Controller(), client) - suite.client = rtest.NewClient(suite.ctl.Runtime().Client) -} - -// Implement tests on the suite as needed. -func (suite *reconcileSuite) TestSomething() { - // Setup Mock expectations - - // Push resources into the resource service as needed. - - // Issue the Reconcile call - suite.ctl.Reconcile(suite.ctx, controller.Request{}) -} -``` - -## Integration Testing Templates - -These tests should live in internal//test. For these examples, assume the API group under test is named `foo` and the latest API group version is v2. - -### `run_test.go` - -This file is how `go test` knows to execute the tests. These integration tests should -be executed against an in-memory resource service with the standard controller manager. - -```go -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package footest - -import ( - "testing" - - "github.com/hashicorp/consul/internal/foo" - "github.com/hashicorp/consul/internal/controller/controllertest" - "github.com/hashicorp/consul/internal/resource/reaper" - rtest "github.com/hashicorp/consul/internal/resource/resourcetest" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -var ( - // This makes the CLI options available to control timing delays of requests. The - // randomized timings helps to build confidence that regardless of resources writes - // occurring in quick succession, the controller under test will eventually converge - // on its steady state. - clientOpts = rtest.ConfigureTestCLIFlags() -) - -func runInMemResourceServiceAndControllers(t *testing.T) pbresource.ResourceServiceClient { - t.Helper() - - return controllertest.NewControllerTestBuilder(). - // Register your types for the API group and any others that these tests will depend on - WithResourceRegisterFns(types.Register). - WithControllerRegisterFns( - reaper.RegisterControllers, - foo.RegisterControllers, - ).Run(t) -} - -// The basic integration test should operate mostly in a one-shot manner where resources -// are published and then verifications are performed. -func TestControllers_Integration(t *testing.T) { - client := runInMemResourceServiceAndControllers(t) - RunFooV2IntegrationTest(t, client, clientOpts.ClientOptions(t)...) -} - -// The lifecycle integration test is typically more complex and deals with changing -// some values over time to cause the controllers to do something differently. -func TestControllers_Lifecycle(t *testing.T) { - client := runInMemResourceServiceAndControllers(t) - RunFooV2LifecycleTest(t, client, clientOpts.ClientOptions(t)...) -} - -``` - -### `test_integration_v2.go` - - -```go -package footest - -import ( - "embed" - "fmt" - "testing" - - rtest "github.com/hashicorp/consul/internal/resource/resourcetest" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -var ( - //go:embed integration_test_data - testData embed.FS -) - -// Execute the full integration test -func RunFooV2IntegrationTest(t *testing.T, client pbresource.ResourceServiceClient, opts ...rtest.ClientOption) { - t.Helper - - PublishFooV2IntegrationTestData(t, client, opts...) - VerifyFooV2IntegrationTestResults(t, client) -} - -// PublishFooV2IntegrationTestData publishes all the data that needs to exist in the resource service -// for the controllers to converge on the desired state. -func PublishFooV2IntegrationTestData(t *testing.T, client pbresource.ResourceServiceClient, opts ...rtest.ClientOption) { - t.Helper() - - c := rtest.NewClient(client, opts...) - - // Publishing resources manually is an option but alternatively you can store the resources on disk - // and use go:embed declarations to embed the whole test data filesystem into the test binary. - resources := rtest.ParseResourcesFromFilesystem(t, testData, "integration_test_data/v2") - c.PublishResources(t, resources) -} - -func VerifyFooV2IntegrationTestResults(t *testing.T, client pbresource.ResourceServiceClient) { - t.Helper() - - c := rtest.NewClient(client) - - // Perform verifications here. All verifications should be retryable except in very exceptional circumstances. - // This could be in a retry.Run block or could be retryed by using one of the WaitFor* methods on the rtest.Client. - // Having them be retryable will prevent flakes especially when the verifications are run in the context of - // a multi-server cluster where a raft follower hasn't yet observed some change. -} -``` - -### `test_lifecycle_v2.go` - -```go -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package footest - -import ( - "testing" - - rtest "github.com/hashicorp/consul/internal/resource/resourcetest" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -func RunFooV2LifecycleIntegrationTest(t *testing.T, client pbresource.ResourceServiceClient, opts ...rtest.ClientOption) { - t.Helper() - - // execute tests. -} -``` diff --git a/docs/v2-architecture/service-mesh/README.md b/docs/v2-architecture/service-mesh/README.md deleted file mode 100644 index 4d0b6e9ff279d..0000000000000 --- a/docs/v2-architecture/service-mesh/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# V2 Service Mesh Architecture - -In Consul 1.16 and 1.17 releases, Consul's service mesh has been rewritten to use the controller architecture and the -resource APIs. - -At a high level, the service mesh consists of resources and controllers living in three groups: `catalog`, `mesh`, -and `auth`. - -![controllers diagram](controllers.png) - -The controllers in each groups are responsible for producing an output that may then be used by other controllers. - --> **Note:** This diagram is valid as of Consul 1.17. It may change in the future releases. - -## Catalog controllers - -Catalog controllers are responsible for reconciling resources in the `catalog` API group. - -1. **FailoverPolicy** controller validates `FailoverPolicy` resource has valid service references and updating the - status the `FailoverPolicy` resource with the result. -2. **WorkloadHealth** controller takes in workloads and any relevant health statuses and updates the status of a - workload with the combined health status. -3. **NodeHealth** controller takes in nodes and any relevant health statuses and updates the status of a node with the - combined health status. -4. **ServiceEndpoints** controller generates a `ServiceEndpoints` object that is name-aligned with a service and - contains workload addresses and ports that constitute a service. - -## Mesh Controllers - -1. **ProxyConfiguration** controller generates a `ComputedProxyConfiguration` resource that is name-aligned with a - workload. `ComputedProxyConfiguration` contains all merged `ProxyConfiguration` resources that apply to a specific - workload. -2. **Routes** controller generates a `ComputedRoutes` resource that is name-aligned with a service. It contains merged - configuration from all xRoutes objects as well as `FailoverPolicy` and `DestinationPolicy`. -3. **ExplicitDestinations** controller generates a `ComputedExplicitDestinations` resource that is name-aligned with a - workload. It contains merged `Destinations` resources that apply to a specific workload. -4. **SidecarProxy** controller takes in the results of the previous three controllers as well as some user-provided - resources and generates `ProxyStateTemplate` resource which serves as the representation of Envoy configuration for - sidecar proxies. -5. **XDSController** takes in `ProxyStateTemplate` resources, fills in missing endpoints references as well as - certificates and CA roots and sends them over to another component that sends this information over to Envoy. - -## Auth Controllers - -1. **TrafficPermissions** controller generates `ComputedTrafficPermissions` resource that is name-aligned - with `WorkloadIdentity`. This computed resource contains all traffic permissions that apply to a specific workload - identity. \ No newline at end of file diff --git a/docs/v2-architecture/service-mesh/controllers.png b/docs/v2-architecture/service-mesh/controllers.png deleted file mode 100644 index 76efac1d059cd158a1bc462237cad8322c54f034..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 479357 zcmeFZXH=7Gw>GK>q9}q=r3FOJ>b2&i-rDUrUE-V*|ZfLK5Y#R^I<0@4kH7CH#1 z^cq?~h(HLCfRq5CoQL(k%Xja6mhN%R`2KujtY0t$9`EPA=e*{1U31=fW@Mnl$iT&L z=+Gg?n>VhT96Ce~IdtgQ^0A}9zqEUCGafp`f9U4*tG5Ge7IE}R)`JM4<^8pbF7IDF zf6T0PrC0f_&B@kQHf4#Mi|%KH3{a-Yd~p1Qt|EDEA)gw9=}JMLHaUsmw-fpEc8?la z55F|>lHt-%9q|JlqrGv&kkFYFNQSQjUvMK;Q^v{r$KF%m`+h_@PZIYD`olD|$C&vw z9vu4nZ~hQ666)zJx_I>ZuYTjtKMs5OKm&XI*N-{)C5>zv8l2tEA9~BhBs9Z*~ay5x?o-Fwg8K z(qg~g%VUtvzu)AaPygc*e@`b`DqQRL3lA$&{N;iD`K&NKI$F#-_STQTJ6dLb{vKKS z-~Yz*+y@%+{Y{+gzh5{lX7=}IL3^2vnQ-5Vv-$jQ7Y+mw&u`D-pCI}ti2ezpU+3CC zNc0a9{Zr0c>BQCY?6q)JMdMMa3?b*u2e zeZ)qT=Be=t)*be%Zr!uu(pOY?95WqJlWHe)u>rnYd6lF%<&5ea`HdUab;ZgH-z&*s z5suB-Bu+hCMW%oN%9;EGg?$+4v)|39kDXYnapTBW4P8=>Z_TJJ2N68S2Usd5;h_a% zA4P}@3yv#w@RsRajuzrT3%q+*9r>cSmy?GlGCX`+VQ#>@<;RU0l_P|*G(2zp65seW>}Y6dF)R;iWGX7DLQJjt6_eb>io2go_^C~Tagy~L zvjNM!LK%MBXu{2_UYZO>lbZ?sy6xveXU5c{WZmSio0~g{412h`Kdq>rN)nFKs}rmB z)Pxx_fGIKk{U%@3D}RIsT%yZQ?$6Od75ViBf+0Ayy2aeNqGXz3`4qpP+)TN(a7#p!h~t?MxSSy2d>9Wt@VI5itN4 zw6;!Zlb67p9~}eZHwBpVab=oW-fL;QlxO%Dg$oijn|Xb?IZ58P3+~oRM0!^ep5mN} z>3n);uq~tO+|%n$sqFA2Y4y4kno(I1Z^Y7Bp*KxK+1ag?gpvh^ZS;nMXQCS@Ny&pr zIbgq%(C1dcP3TG)v1E9e14Xx-rU!8%c8~brmnfg4Bg7@3TkN<`^w}{u%~Qh_17Tp5 zZkR+BW}pzhW=|jAH0>htIr#+*aT?3>TU+S;2M_#goiy{-@o~R>O&?RDE28skuBN>B zqq|}^Yt}7l{Sm>EPm1V49}RinpBhCTdqhgwZZ^MFbhH9;*GJ@et(?Cud^E+ssW%s1 zQ8DlYzLt(O?FrHd-v7Fwi>p?rgqJ#cD$R*bR=r0DuYzr1cRFq+m*n@AA+a2d3({F` z@~!dpP@fJxd-BaT-m~g@*wKlX?OMmE$+|_)!qDBMH5cC85m*752Y4_=_na5p%6BV$ zV1^2LdiQZvi*JusTD8OAXdh+F3f#Xcb_v1OuC4rAdj_ynl<6Z?A(Do00^e&TM?(c- zkF%Fs^gYo#TA{>X5B;gREbRDeB^!!?H9F$`E;MmkSEOg@R=OK1>IcPyW9cDUhjg=z z&sb@{I^KpQB8N08z+{=!gyW#cJbLsfE9-?*cfvaiOuiM?x7G5pquZJ~4Ou$t-p|fJXqw|~TeGS^ z7q{^Z`n%i&%(u>mxj2kB zQfecq-%w0EacAa)WQ;0%2l7>nqRKY?(ObIbKt7+tvIOJFp@iPb^&(JwoUGb@nNw?) z2q&N3xs`rij3KObPZ+V^MU8ZmS4Aupbp>?FVC$rwbcpuV)o2B%D9Lfb##c35$Yb&; z@F10!O}MfCw+Vf3BL(A;p^^9EkOt$4o}nRW)Xfq$pH86k*)ha`P2|bhDV;uqFlrno zQn_#a_qlltQkrr^>v^?jb2h=i-s$x7^0M;x2rsAjreTN4v32fhUFv|rf-~~0OD?|X zU2<>FFtez6vG|ks$@5rk9Ua{;SCpZLkBu#Zv%EV|1lr6_9r0W=!1D1T6-%As9u7|v zaD5e>Wn=wi%D1qddRU2~GE{ZW+CDNX1)0i`PHv)G)uoI+Z@rylDs~z-F~Q~M1bag> zKV36D*EuiSVhrIgo~#rJlA~mknc}Zaoi~zbwOm9k`~=d!`$i*8S#Y-D^JizSISUa& zL4H2mzoVt3wdG8QR=g)-*ROuhm(TUWlj8Mxt1%m|F>Ew!>3pTQO~rbd;r_mZyP<75 zVN<+mGM=S_z55PiTTbrEqPLjjTws#sZ?Yqei|)X;7cEiN38z8iBB9>i3Un#AUfTzr zeD+J#D|L@9QoA;MI~mfSa6nCTE?Ss{F^E#+6JOuw@IC-AY3*8xX*BN9eN9K zV~>?e&zwE=OZJ!MN*g)o2m)g+S(&_=syi|FCgBus;PGRS)jS<{Mo10}-7;R()u%mh z>UVvFH_XCxn0Ax2u?szgkI6-q*%~l&VN#RZF$nBjw(f7vFG>`EoY05B+Vfv%$d^9k z{LM-uf&Rf3hk06}Wi-%8VEg3#>y0-P10GvnNrUTfz0lKHz#snW`2FAjMBQEs8{z&P zfiTbebkaP(d{GUy6TrSm76A+8Iu7ji)2UyBX`TfThM_)mn0JFWfE8ao8S%?hFlqtl z?+#KFSg1CzVg?6=U!H#0C3a@QtnFQ3p|WRy6*D)V`(^LqHVUX(`_n+&{w*`yZ;|?HzU-J-z-ZaAu%zdu1KPk;diAt2=D>6uqFou&Q-C; zy38Lj4xKdo)35++pqpW&<2oRAc*mqT7Z;bmMQ#Ki;=Gd5sKZ`Tk+Zw|_*CFuHqb*z zZ3}fNxo9%Ua-H$lbw>&X)SYMR6Yr{zLtxs44~ugLHDLeK|9M;e{nL*4ezrX* zjt5nI@WY)WIGt1rcJsS1H=f{z3A z5S%fF<|2e8XD?B@cHmFF-6#6Y8R8?tb*3u?PQ|ElbQoGUD3jQ`+?9TFK6ZBxm;%f7 zKhH)qXeN%5E`UQvR& zsgWZxPM-i-gUa&axAEJ4<}*CKdkaqe4eJ~{ka6{q2ju5=ei(^Y#NtnXXdFv!Fsy7vir+<-2In3JhF@^KNdWnS&DRs z(@eLSeguz`2Uucanoo_^5ByHM+nOeST!@D}$ z6?4L5vl_~T^9tUNbar)qDxc(M)u9Jp2jW#-PgjZ30=Oc^R*_pz2Ug%6Q}ivSF30EE zi)sZ86t<-Rl2vc}Yu*-c4@e*LS^3k&>;!|yzGo>P1~n>%1u6)U;&IAqd!IE;a@^g- zz^F^xRC>p$hk+}*qD8Um+;S~fsbrRB0RdyN_?>O@R4!!SkB+gU7@2J7rwWNWV#{0# z!nFgiDgwhu@}_BE2n~mIV`<892=e158qGvrSI3B+x{EfiA--+a<;7sfgW*X59j9q< zmuE{%f8{WT?Hq7t>;V&vc9rcBk295^8JZB^^%+Q~*LHK$Raq_-lup_x7vC7HRS=ID zf8K=cH&l`13)t)pSboul@0?39CiaLcxzR_2-vk|;$H4@sk2Yr3VtWoqLgZF;XN-)2(nSf(4u+GH^Ag=>t&cUn>lg)5 z@p<%*<;7B>TB=_1U0 z`CJ!nJTcCN6kAchk_0&3B%dj!qNqVPY}ntj@Ipkf5vr1Ke3uZne<;ZeoS}lqCwOGC zVXu{*fC7VE7@l++Sl6v)s6FZYwW8CIJq8G!;*0~KGw7REUB)V)RpD+$Q+3P(X@Hvx zL0r(*Y0E&NrUL72g6oexKo(ESwn)F+7{T>c31ntw8Tl`iA!ib}iQF0KQ}bB%HcGJ` ze4}Xc^}~rci+UGRjFV%{QjK~1L%Uy(`lVSNR73NBRYTbx{j+Wb7VIehK?^J2z#qtx zbf0tDzwqj_S_Xw=%H4h|>Ne z%mmUE+~ZoKG-dk%NwRhsC!r|l()31R>-0*5K#)8~a=B!8UWrrI)hq1n3OSI#_u>v{ zL0p$f`x{vp^cPxDs*IM)1T@@X30SYsPPe6~$okO&wZqTg&jb_)GRZG=xf4}6r{nmd zye*mzK9m)@qV=BbIq$EfpF3S-lw*H5D!}*VryaV}b2YhYr&KSEO_99q0Qu;4@j&C0 zu@fXhFHb+*=ZRi-J3p~ggc6t)buO;s{;OY0RyLL!NCdlR?1UfqQz16G z<($r73p~V+NXJ8ii|l-E?Qb7OlE@sF<;W}3pcH{yrz0l$ka6gY70J*vwOLY~8;|vq zUu#^Kunb+;%~6vdGW!bb)sj5O$<54dXYz+NYuM5{9j1Dy^o%(R;m);nw!5W6G`MLN zQS-Z3*r|LL99{uBR-RQ0Y~KP!e(7S^J*2xMhTM#ejcr})dP>=~M|8YZY&;V1q=JP| zcdg-gK$QlJ=6QRS7EoIvcq}=J1O0HXsquFe%b*g!dZ-bCMl@b0cq3IuH@S)iN6hy+ zWeAuF=?r60SAk>vouZeD0fq+GKA<0HRP;67v$0JBUwqsDD{MYY^GcY%C;9pgS5+C1 zaMw=!FU~^iX~=a}r2N+L!2^h;AA1XcS5fi1vp_`Md#ITF%W#x> z2E5_TPsK052fK~_Uz{g9p2J~9^TsX!{mXjNUQ?%0p(mB|>yG9{W59-ewkhlD2JH5e zgKhtvO%o*mOuVXI0vfP|XzzDe4no}VS;#M7MdLxk1zu0rm>$34|e-t+y4o2+GBeE1o_`2=|4gKcaZxJ zlK&0lhW&%&e*?MyAo<^Eq<`qj-?fzf57Cu|_s656qxDB?ac>WWtD zP*HvYS6wu~iKhQUMH^OWZ8)bHp!&=-TgPjpI@n0F<^lr$?cz)r@eDvo%~kz^xd7f{ zcKRpp33H+=XY@+l4J7sr=D59N{;X@~_MqD1x#&P=zINH=1pVDiR5|Rz(eSpEEw1R4 zu-*U&&S+7wRD^56O)H~3rPP8yEej=K#YNOrFK4&ahoZnbntE==YoiH_DRKV>93y3b z(T(P=|Dl?z0W0|q^S1pD)fGPO&ggB+`Mqu#3tdc*qP=MI?a5)1?(Y6GP`dKa!v+@a za1$eC9?|Lu2Ym(~WHG6(mjL36e$o5%#oB!8i(VAxE%kYWbP{@Y`{U&RXjha&BPDzr zb7Nn0cX+uY)P2N~8B%mBD%x0HF3%Ku_ng!h8mKXczjAnacuGz);&Vyo8@rYR)$NbkFB@PJ{Dy>9wzZ{aA}!O}N5tY6dB> zFW54bW_KN|?Vv@R4Y>^Ur@YUq%E{@%ot<~P1J>lJgZuH6rOC#OXJ)NkJ@g-oA}kkh zR+Aab5=I?af#_EMFA+vpnU(cg*Q+bLxVP6iU{gNRV|iljPOxjW4>SMsWDUyj^athd ztH#)#4fnKaj~q|`BXVg4bbPkztnw1`NaeWoHVkWPp|1Vi-Y4M2h@h_-8^eBg{D>v) zV&a+ODRl{{Bl2a&^qxb|e})$ z)e90M?p#P_T?1U^&jxnrko)dx%W%L_5+IsH6iMI~Z(R{Qy7qarvGN>Um3mNyI@I)3 z{KN4hv+wWu%vH3euUg&Go$E@SVC4k{6Nhdp3c4oX`H>G`exv4rehe`4oU9tM?FdNS zdeD|3aPGZeHO$a@QizOW&&jrfr50uLfEwTar@8-F1AF0bv~*eq&DcU0FE8)2d8T@4 zVg&Z~+JuC`gNCjXQGD7&Ac3V0iWqE|Fon_y>->I`=n7j>@)x~JAOBMt3j-YuaNRre*FyX~;@$bvPA4Fu28%}zh_g|6!;l8d4d&UK z4yMJ(@CClos5q&UbLOpc*!dnDdQvM(qoOXov80{F`hiGLy~Ej=c0OXXF5hBwR${oL zcfggH$x_uA-E*Thr3nuHI++IJB35I$3X2(ju+G^ji;@s2Qov=p>h^7iAmY(Uzx$3g zE6-yTU_LsdsUv*Vrl|5(y?@h>588l&mAxZd>~w}OYZePk>is3gS@j;ieRGJ$;iaO8Jf$ z#iMtX9Adk&`${SpH=1Q#F5XNVhE*K%kN4z>T;Q9Jt~U8t%@1AR0}`uC`GYDnroy1_ z^Y~xz4Jcbbzu-^Q4P=8b(t7NAYeLG@urf3zHFOgl(0{H!yDyVd^e1?;0jfLyw)rU-V&E5~RgS%+1mxD268E$guxt#6n-B%lj*d*5OLurK;E2Ww>Qf9Al&n}DTo zb6FMw2J*J$r=@U^w}6eJ1aT5qw2knL?q-Zlt!VHIBcai^J%K?^Ix1mSH7GOjSeW2K z#55A--{xVsrsb$nTBS(@Eu#}(TOuSmg# z@M&I`DzpGQ`n8&iEe}(UJhxG`O+!sNf^xuq92&T+Bb>o7rtt1pr;#d;< zOdgK$;cI7|S`hxr390Z|HT~`=mjziQmX#tBCl)+~@Sm0SQg_gvXZ+wxDs9FV+;r6S z9OE7fDBiLPN=ZS##+^uYaXo1@NbflU5*Anu!^vjTZ*AGU*tiu~<0T)Y%GnfHK`jNWcYEHh?G@h>D**z36 z_CiLN`dn^Q48~yAYgEC;c_TD7Ab8g<7@wlQ(r>ow^DSxCB!~2lTK4Ea)J)1AU`EF? zi$=H)W|nII^t!qvP77nyLrQ!a1b%wl>$JvCI`q{<1 zld9LNDetktKenN$BG~u9?vL%Zr8Vk%g18*QvEdC)iWSXh( zwR~Ok5ZKLld@i+rSgzL2oiX?DJjGxk(wsbb+Cv{|ToYj(%vN6)w0a{*#+(0sne1zf ziVbHG`Q;?5Dd9W~PUN`V>}<*%tT zok%IlLpVlP6jn3eGf?|hZ2&!X1s;9hQtZ*FNZ53@rzj?3Ixj*leq%5>b#HNl%I2V;DgG1h%h$W{7K<`JN1X!~%)dO3?OvPvQF_H^cW09OPlvcy+X33P5n?#TilL z#aRXKidhAPCyrFed{EM7t4eG;hwwFA)_n7k_&s3o8ekM|rWK^Mq3vOT+H(N|eBvue zo$jYD;$>y^X&OLZ4&-#^GXSKCrm`19#ab@61HtR)A{r^rtEAec>Krt$X?2Qb@S@RU z%tBBEJ_DUYnh_ZWXfh{A;okkEy5gDm{KBD>zvd~PCjuSoipblO0S7pE`%fIqaSzKl z=kKjBJxveCZ)72S8j|9KmCTSabm7l0^>~_A!X<`vhg(vm)Q{0GtLktI`45@9p6^B9BulB=9Lrv#;3ZGnpx4Hsu{T)ZM_RwrWm;8tIwZYa0Am3#GpRHT{ z(ACF&2H0AOk7cHme3)kx0Kcm-7Cl^{f&S)VH}Z=VelGFY?P~y$I&Ov?zXKf9NbtWn z%31SoQ){!|h^!Fa%f;eEQ_PzmW2`}1HFi|9NZRZgjnlL2I*y~>6~lh=h3EOi=XV>X z+gzFa=`f}t+d&VwfRA|?a-OOPTy!WN-}md>t~|)Z#ipUn85WE~k*c456xSKyy|wb- zexu;2A;y(b*&{0nN0g@@Id$9ub>zP=oEbDKRK4_XHo#90u&7gJA4t*%Q>(@Q!c%{` z?TEH%qE7jzdoGqU>n<*%lgbE##oiHCLmeC7)g_G{SGJe~mpCk~z7~trN^g#~@b*sJ zMtCtZK=MP?;K2R@`{!ic9++o{Lrd3DU!_>Q-HX})=X6MTgiY}qxzRYzOA6}_F}3D+ zT5#ZY1#)!}K|92ivc4& zXp)XLI~%|?*C7w{={eU7EZ}Ls zilW%^wW(^`(7}#?=0;xGA8UNC{&ENHGJwVei=cHi<-jI@|0$%B=8m%-Cecw;cxPj> zzUoO z8a0JYb+m4_`^|ouT1|Ab_>}pxv9st3Fr6ZHXf4OVN3tZc{7ct%vuuvyR9B+m)w=NS zb@&fm3PG{uZ+l0L*5OYRj?%WW5DdMT_aN&UcfBdu7Oys5t3ho1cC;CwlvG@j8bNA( zi56p8AD)sX2jFO#C1o!iwb>6Ucs7e>Yw&cbAFtr+J(XWK~}pT6{KT(C4h4a+$|1|OT*A+0QdD4*)HYyCK~cYW0Z zW|Q9bzmS8&0L^peM(WMzH@M~%CiCKF`Y!4?rj%Z`Ilf_ozu*%f{1QMTlXOO#<1Ek@ zOdMJJSE>iz;jf)LEehA}w@J_JeTVTyb}`u(9l!QXF)T+D*I2pxMcn86!#D+}{c>l0 zDq4SCfTBU^YGs+W9ov@*T1ctquvs0p+i9Mij1J5c?XS;|tnlWgb=3t>1-vei;{Rnk zeY12R8Mq@;5VF6f=+mdRWLA~~UosnDF~62(;X?Qk5j0)zImQg{c8v@TzB_jbKo-RSEl-j21b5GFz&`Ny@=)bKbBV(A>jy04#gnXy58| zc6l1uPs}J3$=;y{`7%NF3zwLto8n+vy}8sTe0O>96&rbo&y`rlwoU&$hdg;;4j22>I7_m@7s9m0J%Q zVt~lH|3A8ZHa=l!RcChtd(Th?KcpcVSYQCqlb5Uzl!fc*<{y%m*IP1@pVO!~<})e3 ztGg1=Tqn&XX*03CIR$E&+}LyJ;9Llq&rp1%@dhxm@(a==R}^9kc%ztX&=(+&@Ft{33Ag^j?U~ z?!Rw<_Q(LR@O9}v{+K%vq@P^n*lS@6IwAml5klnGNIMyZc}*;5OZO!9dE81T-B$HI z1tGhusHu(bG4)!@J66{$IfAEc%XjXG@$I=>xo+j!a5XI)KpNTGi)$ZJYwR_HSL;#~ z9P48?qX&}5jMUfTcZv%cf<~XQ6AVEio1yXr$m)1XOEC}jY<Bp{S`_!@enZ)&YpjF>ohEKDMt>TQ z312%(6|MPpp+;p+gOX~sFBZ}h9CC7?Y|&`e{HPi?`Rrz~GxtIwt!l*usX&0Pe<@8R zD66MzbnQMO7sf=-HwS=Jy==r1a4IwL)Kq6pO1^5C!%RA;n1GD6MrETTNJ5iJ^0S5% zSjuSP@szj!j)jj4z{^*6^}Q*V1biR&`v=1HM)awN0eN6r*y@9Ta4>FgoWGqrOwCrKb6({BW&MZ!3q3`y1XOfIz zVI#2Hld#|kUN_=w%MOao=i6sZpN*cGF-CuY^}_YlIc#0G-VpNLKotQkCheg(O1gpv zVU*(u8vJP7Eukw$hwD^S4w3RyRXe_gdUQ|RfDdE*0bDi7UZisPK#pm2ePh?+Ur#A6 zdODccU%${dQAOYB)|;Go)%v8p>oHbq3{0g;aBm({hVn8u-I?p|j@@?XKKWm~$bT{B zg!tEjfa8SB{~-i8PF{O-pGnE5`ua=7Mt{S7};o25dKb z!z*2GdL@yn0#QsJSTb5mg z{F4gol@wlix#ZKaLuAkbaZUxk7r-{^1i2~37bq3OTc;Ysy=S~ASANmcSw)}-xRg4= z50k?AEEp&4b6>+Hr>@z&dje2qVdOtzK3ZP0u4ue7)?|^!@u=^4Q%=lb*!E#C1 z2r{>~KI4XehUOTf8Yc7VNV+5Mw{Mskjc2)OeOqCCv=WOgh^`yBxn7a0OChY?w|7J} zl2{ck@e<;i#%EzUQn2Es@X}7{=MQl+#$w4eRi0L2#bD0-g}K@Sei(Qn4w9^x>1O)2 zT4^>`jg3i`ho^mkBYSwWiXS$%n#u@`fD#SDTwI@y0hMj26}_3bHdihRnmAMM4RNa3 z`e*=Z%8xFAf3+5tdN~TI_orlUP94Gd?AiC@nG93QP85?&estZVR@TZ;MV&Z1smlB=bcStZ6R$hHL=G_T;F^QJ5!(lzkhP{_ zs6tg?WQ>;fn%GewuNTMSX1kEv+bhZnpnz|CmD44n`;+6h^BXL-n;SA0j^dj+ileIfx%*1mtJ|VPemJ3B3!lyX z<%H;&0d($m;rPJW3V^d)9hf0Eq8XhWt$Y_rIpGd~@ej$8syLTHXrkCQoUgN&Wru6c$E8mU*9Skjc)z=#;_$aCnd6$Lp+jcr;Ds*j$QO=gEli_TS5_gek)V4 zJNk>I-5Dln@D(PB$W+6_WIj0H0cCxV^yvpMYie}ak zT&T;I-6#{KFgLVZHeM&hpNGt|9aQ7BJei4b*Y$Ul;ir)V&;cQmp1Z#*3Yf~VHdRGy zPUVJm9#`*#dM0jTQqtmRx5KsZFp1zxK>uwzG*w>6V05mk-fiIJBPYSXAoky*&U1tR z{+x*8s+wua>EoUoG+|*=Pos~VZqD55Wep?6iG!!jC#efSe`dT;$JEgj(v@_Ukx}_n zTO_s+Tp>5xl?-MuRHwc(Y%?TPOYTi;?#s-Q)fB@>wunthhqQY2kacXR!2Ql7wpwry zel)*0a-q0hVvi8Jcjc7YT932)xKBo%p9=MJXgWaW}l8lQH~i`M3#+GE?| zRoEXr(nNJ9UyYP+cbdFwOtCLRdK%~^E8O3miD(>^{h%=5GbE~vfJryB=rYmMf<#ampgMoj|7 zp~ov{W)r7-b@*Z`lJ?6@yzlauTSK)9tV=J7$L_Q`ySqj$6yK!m%^#6gQQ^_y@%bak zzx7@3#01~2i`Zo4w}sx3k+Zt-;?XH2lEOwMej451{WNyBI9A(dQhoCqsBLk??-sUp zwQCIDzrURu04H8h!*`TsRKp#|y>}V2sPeM%Z8wEd&**>p`vUlCkos zp?jrj@{sQz8No=AUzJ0a;jQ?aV^~e#zD8>Mz;wFK1ojJAYXc#pJtK&(-s=56Qv z67lL2(&|W`v$Q`?cK|S~3<`tvTY3-fDkL1J|A8KFlP*^7?Z^fZ8(-TmzLuoAg|5o) z5ga;(hOi_=m)rDfQ$u8V`9_I^DJ>>Zp3+*pLxh_~cw%LW3nigA&xsnvy_wW~X}aXj ztZ%C?B0v(*>AjWIJxiCZDP?kHExN<^bP6f7;qb~?UD+#JE;Axnt*vk{<>>lm`d&XVoES;xCl@Xn9o8u_By4Pxa* z=n(tWm4*g8_wkSCCbs>&Vp$B8ss_TSFEUCiVY(eeVq{}FFjD{B6u&NFu}cLGe2xwS z4ua#yI6`EW9>HC9hP*=gWAqxALigw9@IvZaJ}$B2&;{%p;QBldk^}Vm;VgG5HXYu! zRM9dAr2OV9b?T}Gnj9kXxmLa|B_;RsY>s{+dwaY%vb0N0*@im0I_(IeR-@nhneWVc z)vDjmCVg|*X<6k^Ybk46@R!JUHkc^VFdbEb-C=+rK{-vv^BNUW!qEBhJwG{XOcn`H z0feq(29^O%7%)&>N4e4&v?kedAnGNdQI!xJ^)=veish zVQUc*sM|bI-8SxG>gX0FqUTBYcNGrHV*yMV>zk!;e!!s+Jg_RBb8#7jT6qSV(#3)K zczL4@5kFXb#$rA7!{#&=t0zNNbV7ePi9AgN3J??drxDo(Vui({GA0YTH=QO?st;i* zqD?(?XTLQCzQi_{0=LN>Ti4y+O_hGG^kQGCwmdyuO>HMWluxE_udZbaxFRH8Wd67@ zboB*O{6>%Ja@wN|L8ur<`r0c)wbRz6m|NDL-LOT^4EM)!fDc)C=d*LXII^=g5igUv zS7BnlR2@1WkDq{(f^5>LnP4vwalzfW(3SLIul*y_p#I9ZoQ3+i&>i0|b1w#Fs=Z@X z`zaf-#lc%hV+c8f&SYPBQbgMwB z25%$`eF>P-EAXn#OG-0HCiM<0-;e>O;t@eJaSTL#Y$tK0vlXTlira+GmuC;n;u04e z?YFsZ;W2)z-rGL$C-Ec58ox~%Tvc`4z~IHJfKPOsz!K0-o}A>~_g=y%_>ea?as6|8 z@613KpMNeq_F7OXX%w>G{l;y&(d@{|3v5@63`LaOoO2J?@#QB|GZO)V?Q6;Xbt4*F z!PnFO!u!ifIzy*gmn{$w^p*GWv@nmR+q73(&eW^dY-SL4_ZO4tgIUcuS`358Pgt)p z)>0;vZ_v}xzCQ-4rJO%+mO#hKdbm;ISl@N#)67?C=WDSS-cxMq;I=nTUO317k}>hS zTIkOCk$3XsWch04j?k{E?;kzJ--{eQ$wLcPu6ECD*PyK6+dk;q4URWmTgumQTpFI2 z9X?IVxJ(gp4m_czX7e@r>jS?}1ZgzF`lF_TOxl88*XT`_)=TE1%ARK{ORAxrM5aN7 zg@Ih8;ZSga-9o_#`CG;027VKzE*;(A;_fbXM?4fn(o!|b*0ZEb*w4mqZw5Gy{4kS} zl`+$Z4|DIfDQ^Dy^%At}t|p~xY0MkyCoordS`is7%s%pnrzYB{D41?>9||h>*gfnw z5y+9{>|TKH(>igQo$po`{w%8`LJ_$Al!q3&Xvmpkx71_b475ksdwY$qs5#Em)%xrz z-4V0RYrS1&B`aoAd5H0|@pu`W$wFmN4vZAwb^;$hajUF6`}@icb9EBODQotgcvnKq zW$P0lRV<_o!fnN!o|knx@*SahRD+4#aG&6y67ita-uDuSq2_M{bMOJ1G0vh~@!zQNx9Tt}>nZYo1IpAICHcBB**dN4D8i!GK9 zJ#Op!&bP)}Pw$E+ix{t}KhNQjAW2$`!FQYF8yXeb(N?#sHyG)x$+&7Hg+uaM_e8Pi zp-$3tae45wbT6hhLvZNh#A@ZEOo7}sHnC;{rEH4cBT%jrP~pcJZY|rC8fiCO)sb4^RhyAUB+UEOWlOnC9R9nBrD8B<3+Jc zQI>w*OQUb}!X%pKpW$267#c_GcDL1!ysQEp?L9HuE%41f{L-8J58G9X2@;7BKJ67n zHcg#w@kSXC%YZu{3hYX=mu3f%m0DCx;A@SRJlW&nPW2Z{v)lTH{9D)L`gP9NPOd@u zh_h@xj6_t)p4ZZ2BI&+~y1C5AAEL(mrgW7tBwLuk z@;vI$70Q)1_!5tnxUmK+Gu`#We|_Nx15wf$cei9kSD0UeB6z)jR)PjnmRqz2d`MKh z$el&p?qR8%hbb%x$|kiR7Q^o7o6X#?k8!*AEw4CrSIO%nM3MLg|5TA((POOCNvN^g zIshMC=S^8|ctoX`?G873F}0r|u>1HlzF%<^ma9?-KgpY&)VxTrrhaj)k^SStsQTs0 zXUH8XS4n2NGXuPeu42TYVkSvn8cA!+jjKfMXs+hhMDh>LcRg%5d*_8OzT?F#DMPL= z$Sr;FlwvbLjB*k#3*#|mr%ug#aW0Kj(}P2I_+*^A%}#$^)SeBh@u77TGQ1Z-%HmLh zu!Y*64vI0uP&cu>gA!aIRN zJetJOv;qOeoYMWco}r$;^+a2PUgY1>smIoE|0K=`Exv+ zO4Uzmf6;mFwt`CJ-XE`rR0KWGDz%WGNDD{UBnUq@WMfWCG~_U7wx^6QJ`u8C+58Z} zeYXJZx0F3)^has%heM$it7VEg?aO6pS9+j|4Y^u=l&!Rr)`=d$&02q){x3&<5CA1v z4-VnnzX)>IKL(=6_SzK>(s>Q@MB=TBRD+f1c?*OM?T2Zk~!bwHID-lqp@|`?kgYNKR$&{28rN zl-sgFp(CXyD96VQ64g!!-io`;y!J{KWE{0R$?&x+S>CoPYQ$$}qT25?2S-E^F6g-~ z_VoUapGj40HDsxX7*!|MQ>(wy+dg3%<@-Isqc@K&xTY3`7?a!Hk0Xqh(R+?p#u#D? zw1E$v>a7XXy;~Q2+_qp}tSp?A!^+a>N9@gyk9@2a^j%8j_(^%!@#~$9$mx8(JCZm4 zng!tK2&E4T3;RBeU|Bk*l=#l2;?6L+ez9RieM7YNR($@`OR~fa1x3gbPf`ipR`WH@ z<+l%{ny2zPx?lP!(?Gc@u8(!q+TBm#X)9dtptcw-W~lSbVY6Nec0~Eg6IJAA_t%H4 zp9(ln)ZJrIP#n!yP9DumZ^tLw@2Ks`IX_kSAq7}B>nmmYCY&1Ip8Zjas&+mzWLuikuGCvj7vXC$PFa@py5(cd zLc=$37Ytom!=DM0cw$4MHSN6KeelsMuzh+suK%vv>!|Fi@aAEu<0AE1m zeLP2MzAx9ltf~drGBe=InQyFdi-&soF(fPMJl7SE(UZ~hOck#u2eU7Ok_Z`{?A$Dm z)b~{)>kWByRIFoHG~@1meeYUE{9sw^{#NzVw`?qa_eOUt|4d50q3?3tL*g;|Y^v;j1Ih z?93h&dMJwKHQuMw%`?tLMgt||JqkaPzOdH8eD-cg9m$lF>ueskA&*E$gYWp`XWvt@ zxG+wy+z19)qopag7dHA@P7G_v&qX;?=VfO3#l%k9dQ3b{OXH7J%O3s%6E=u)bV9r1 zl*+Ofb4`U-wnoc77?)n0;8~7~ygAE#M<{0h$&$K<*(nP(wKic`8m3MhT$b+h$!?=# zp3sA0u`2US>`K=)dmtS${f93>>HVvuxsPyc_a?rF{M|sERB=vm!Qww3% z>DV0!aAZ%9(MfLUYrEsTh<2O6R3 zJ0-i|@oL18N80*VtG*7F^3Ls+v~_<8xtr?I_QVJ+>J>A!?qQ$xh+KWsFaG@F3GNg@ zBp}G-ge^O)aUbUVB0zUOR*x}muY@SRI0u~+*zQF2IbWt(=jj{lo{8pclHi9)=t!;# z-M(_wLJ~7?6}6|f*dB$uk0TNRi}Ht6BZ-ji?`owqyjJv z-FJUmyLn_gxK|U|y53WA6A^7E+)YgI5)1cQzv`pxC>GKwXqZ85o~PPUc+ zEBUAjpSvfP^}(42qDVIb3;O7}+bU`Aq{9@#EXJ$sN~`*K2>d3pC#w}3^hzko^tia7 zZk&mxZrVbx#wAK4)^u7+&j+Ts<_+ls?Q^a_2XEE9+h_H|!QsiB6ngS28Qa_vjUkPck6@Wg) zL39jzD|7_nFxLqnCFw+Hn*;Za_)kqlY1CW!J6nCF$yOq;e!pj@b4iBHzvD7mf6U>t zM}vIk&Q9>WcIIi^>ykSXMs+K#29Jza^gk z58RF?>xx*Up#J`MP+#wI%V$a7xR(ZgIqSGK9&A#6{M4x?1edvn#-Wz;;IyZuC#pP< zd5O6yU1L|~G=d8CMoL`c{~yZUDy+?@TNlMCZo#cUTdcT*;10#zo#O89EiR?FOK^9W zQVPMPxVyVMoOG?d_gVkGI_E06%9G5@ImbI*8J`puWjxwkf?nC~;myadsAweOW@FyM#YHwcPp8-YcKg?~$&=F+FWHkHWvF6i$fdgVHiK)YN7VAr@M@ud z`PPjHjq#T%Vtoe-N-5PC?tnx$_Wq70Ox}ZNN0S^W>kRyM`rXAqp8;%z(vgwbOW zAk^HI7@P@szaslzzi8Ve(h85Tjdf3Soe8~@@@y*LZ>1gBiXmWpsDsDrU}$*or#p{UFJl&%O)~Dr>(LS z74|5?gj=B)UUphrdq;`s4=0-*pz@Pq<-=U>9`dncitt(VDeS%$M=+U^_V^3)sM|V; zWhuiuVBT1(ivhho z`$ssB+RkYy#Z3?G4b|K@`mQy`yx?4W_j_vcLXh)qTb&VD!-{;cd%v0Z2=GT*eSk4^ zd3bsP%gmGwy&T93yO6<-o(lNnt!2P-9@12En+SvAWnB1+*hytq4XZGRySHqH?R1;} zHqOB_d}kw{v6-H>l#Zhm`u(jIoD($XOYZair^dZ^ysCP`4E4TF@`kICQDE>H`o_cJ zV$`*e|FNR$;W8pVgPPh^yT=`Kkz&?2!NkqKU<-$su*3vEF2QgD;MZKfvpHH%Y(SWz zH$bG1di*s*EGjIyO5*>(|9@BzH4u8Yp#7pgyazz5+&mo0|2j|FzN|1|_EILccv5O$ z(hx~G(502B^x&kERnI|G(Qlx#*Au{!RB=6l^sG9f_k-Ps|yv6=hVUj>C4V1QS# zvwbuzSWqR9Su@QHIV~YX;BdAgU+*h{`KOxhIe{h26{GphbbmxL4$EjC?~m)1dyz2C zJN;BwJW_;Z9#0Zpy61|ou5F}Qfu+r&PC5Prv=BQjUtGA-(5j-<={Yiaw$WZVf@^IE z^UCC$7@a@pYQTJ{z-XQZ`}Rvv$uq7a5OKe4{)5rUJ4!8!jXyhgj6iw85F%%DMAw6b zSl6oc<7|%f*nWtow<(U>=u8$Uona5bFtlp6In@69ZO-W>ish%o?AF-{` zdakE|xSrG7!RnD>8e!jhQBz}1E#0@PNI*j;x83b&Z2I(}2joudA}WUkH|duHKbhv2 zY=+=k&#UTUbNZaSHa&<3Xvlgx^(j>z@~s@G(xHVLdj0_}QsM?XS+zN6xa`hxy_`-K zu*f$@&_XVMhWNLfl7`@CWhs@HOcTzk@hvF)M`s!M)1CtUNJzS%?$fWDh{r^=)NZwi z$cVlyDaDe46dW0RCegcuOBUJz!yy|%9oV*Xnfcq_t4`V)%{MO=+ zWb~nlsRa#iRlgpt_Rg1H4b@#a=B(=riN4y2V!7|Wp6}>flkyTYl5o$U@^`u3QdYb< zvMstF#AuMW_o@xw1iE-fdWQ9zzP{3KkIX=j^#^+@VJBiDVtPA~6~VO=lK|aLr^>gu{|!lQgX&b+@Knre1aBrtQ z7B2qQxo){zk^V_ov#)uLwKp$n_Z;mbvLKtjYFXG)g9%_!Jx;n3qx~%tq_D!?8{Q=1 zSR?aCU@bbx-tn8@ZBG6ByE2sD`4~zgY?voD;<+Z1L^~!co&!i-JxD~8%tYJ-YLMH5 z53}SoACW5s``zEE{J3Gh2{;c1RSB{eg)8TN>igg#G=@<8&>kGny+tXjMo$nDg(bIv z_f`6(Z)>zakM4*d%mBbOin-DtoPyNStIr}bb%o$D>mOn)FlgBVO_TnhI z+}{)OwJ7B{a6eSyqnpd;ukmHgPsr+bQ(M za`mz6_J@>m94&7;m{D0>6D={HEV#Bf3V{#Jdu;g1BV(#)W#c-vDrTGxlk8l^JKGU> zE$%0%twK-b^P3}q_MVzr@tCU`NI^#IG3Fz^CKgpIfd|5z%O#wLp=$mRJEAmaOSO=+YWllAMV${ zDmx4%`o#*D3&Lw?n$>#!g=mD1N4N4kydW^{!F;YU%^xM+k+=pwW#{=O>y=M8;(NH% zLT?QieRT9i+E)bQ9k4Cke9iX8{nzw#hIw!VpLNm?NWYPH=yolC>mF!4(k?=WU6%uaTWYp1Ym^BZ)Z^ay{r##%fuxT6qfDaOQ6D|$Vuyj52+_X3=RDm)0Cww99T8>St5u${2yxwJ?Xqn@4p-IY^a6(<>N>BgaAU{ zF3k|F*;HZ+(qoxVHc5Th)sq}sTLzfSJX@(9%@UegFn_DJaVDQ#wx3j1*1B0$6Q7Fd zjyU@qLN1|qmAO2h)fwfjZ%oz-s8|^gq20+eDYdc_)jfh zTwAO0+fN*?BS9vcbXz^~WJS?l}96aaXKJ?MF2 zD6#&We+ao`CCV3j#^Ln^e)f3PRYSeJXPd!B~6Yh7&zD_+8svtwpPq0a19L1GNTrKXP=>LVqT4e;71Z_ zd`?1$AVq96MMqi46+l)?Ff5(0+Auny@fNiDnNSk~^u(Fy^x6}A<@G{>z1IJ+89a4y z02tWF>h^sheo@86S=P&(>`>O15x4wdjBwR-Wqg0Hg*{{3{QDQLyBDgf4K8|?po@Fa z+Rl>yGi;dun|zNX4GpuEQ=Fg>?m*9?;f%?SP2gw-_g`-0i^0Y561iaYP-rn7hEmTF z-EV@Js;V4~uT)f2WygKf2le~LoVsvfhGnMm8EmBZ3>s=nHB+v)pCg5<4Dd|*W01u* z)Q8tJb7!-wFo^0d-LOMupv)0Z#;?7n~x?U ztl!bj&2-9|2@2}h>)afyufv2kF1Ow7OPE?md%0N9BpTg54QtX{j%^w9JU+B3PE@}M zH&`t(ulR&M5pg&}Q4>_a0GsR#(Ww-?i@8})!Phgqo8qF}x-5*MlfjwLtNXgH8#2SS z)kN71*$~2ymW1&o*SpKWkveyDVmWgt(z2dyLjO+l zy}~m1EJ@?mQj^(d?(-s9#x>LbP!|H}a-aSQ6)#*vXEXpPvBF3vtO+3~_<>Fv2<2u9 zF$mEWRj9NZeA!1cG+9hmBJY&ija^*5Oy7ezD;l^j;oF-`#%HT1QZj!v4x-o=^bs8_ zr~cXc!PBC1*fW}b*ox~qTsCU(3`@a(N$qi3?pN$g%{n*9w1FMFK>ivOqiw4prdb_e zF@a2q5&9l?p`EzBA{rxI(5lJASZI zlY{mA<0s!7yIfM)A9az)K?|G*i;oB_gtr(r9HwL4r%D}UJll_VXI!ejvVLwA#G!-w z!HF&&hniz0Tf`DSprym`)i`E6puTrLHfUd4h%DpaECX{X*8!}=$v|0XlF=<_Erw~V zI&yLlx0?j6IjaCApE`bM$p}Qrg4?jo3J)i8o=Xl_UT@pGg!TswW~?GJ;(tEvZxyBB zRQbfy#4UH6-CT%eSjW#h&+D{?F2pQ1>3s7RvIUvMaCpeEfpuXP=c5161oXOcxpu;1 zT=XN`ml!gmUK!qLR&6>V^e>$7cbWchK9!vGyv?pvYxF)!DO{ZCUR=)*vSTep163Y{ zGNuY7-#$Erg1DovMR!bUL9r#X=bV2Z%~{~u%I5;UqiNc|kIeM{>na#HaRnZJPUURn!hrq#asQpP3 zJVRGMS=bp)#vZXNJ8Zh-&Z*Xof`7U*-byj-S?vD=J)>M^nxAze|85U`y|)zhtG#kt znx#z4sJ}b%b4+VW16mO9x*iwENINdITL8K3wBiO;i@+ja$$Kw|yYJno=Dv^-HiKrV zC>Fyha2RLa{< z467eH3VsEmSGyg^+(d-Y@jSi$l(6ms_s5DkIX6mT&kML8(}A3q^5xR%h9-w~qJGbp(Mc`>;xh9~(exZzu*VS9zj!?SF z5NYXHVHcNr(N<5)W5c1VMWT@H6KA7D^Uz|u{md?Zfx~}bi6BidRJPCr`xx_<{lgM= z|BWTS@<997LDBADEC22bJhHbfnEp|NZz^UC6Qe>WGm#i5sK0({Z*3S3{B;Qd>J$0s-bDTbAT~&72J5cuHH21`gmLZmu^V-iT?!q*jf)`iMIvXIjfntxu z8aDhlaT4rn&fo0L>FnE?)1dJD5IiopHk;c81|v|REDZ0QLafa#7MJiEmVVOkUD%gZ z1nr-!j*X1Zz33M=JS;7X{ft`PInUS4Z1b$!UnpOxG^mQU47B9>`S#%I-LFIikHHKyf2TIdzED^gsudVG# zedF7(ZHL(jLnhx1A?)@?I*q@oM^1lPIQkR)q7Mwp;R~g$(_P2TcaFa^o_lYW>j^E| z7mpj}lU`i+oHwwAZj;^<06gZKyu=!6o`{$U;;f$2L80AK7GbCB$9%5Ci%=CGf1;Bj z(==Ciyq`!`AR*{fAyUu4ODLK;g=?+GR%Rkf_Pm{>G zeA|rw+G!X0%Ml*Y1H}BmEy3EY!HBNfZjI@>=XVxpIIg=Hg|iCKCd)W{J*^$VFhGz9 z$hwkl4OIPgwmk-0wtCVMGgczvGz9%4)AT#_lUvG9h9_;15CU6kmu|9i*mTrlf3M;TQc;L@+micQ}VQ9o?HYF^E`YEDHWap zs^w5;MCAwbNtj?=B10H05M61&Hxqtd$|Nr6eS%N)HBY`IhN@Ei^0jfXi>)=!a2nStn#wh%|HcZmc)!v6)Nn=u zH>+@OIjVygeYVnBzV1mG8F)Hb-+UOm=lQw}dUm-)fNIJ(V)1{Uv9aK<>JWbJvFK&e z`2ANT4VpdBBi z=FSl9^Wz{6eEN5#={jQjc!D+ga}`W$ro{W%djSO4Fm?S2+%{l98*Vj5c~;IGL@2 z;$z4;{aK~Ag3M~D#HOK2K^aL2nR+JsY(s|g%hbcwF7+o~q#IJsZwd8yen`Yd!@Kh5 zx#{H#ux*Ce(_5bFTsi(ogmZ73AyRQf3b{N;hDzcrz-mZb&yQwfa6g@7tcLpyvrZfO z&QOwObsV_>9PH~W(WATb$$UeMes8wfa1oq-EI$P&56Bx-SO}vZ8~b_+$x#Vnrj0F_ z32Y|iv;G!bKne^1A+S?eE!cwf;x>q zH;clywg7&)}b%8@6=TBQ$nv?J_0p$Z`NNxNql#~w3MYZ?w?ymr; zQS7v838E68pHVs)F+r!1Vy`XRcrktCG;oD~Gzb)YIPk+$}Q%$V>H@{Tw6ABD)@yT}r9NbOjYrn(1hHQJJ^p5q=H)F-yyl4}_&t6MGozGR| zTD`ITV|szVa(_%$+GtdUbB~hbw;<0V$e8Q1zk+P7kk)8yGsxyMp7_23tMyv~m8;C!i4FEdk$3Z|$nk`mSi)s| z>Jp!DAFU8&YlW1;9wwl5MD}4toTX*{ubw4)3PmcN<0#wca_}I=@*cdVIvPhf+2qZT zxba|yKPv~l*4HZuVIU1r%8ZY)!MGnHQfziru7(U;aBVHl>i!~*o3&a{h~KUm{A&m4 z_l55JyrwYFCvK0CgfkrZK<0J{3%LwYH-n}17{QnQJO++$Q@!x$da}xLF6P21E7|A< z3v7Ij6Gk7e$8tQ@8R2@+MOw2NfUEYneW)DMJ`R((H+-`W8$M?YDRfdox;lS*=@DFG zVh+%f7xNLXl59L*gZlvZ`Z6!EvV`ZiJM^kk@C`nFPO=n6CX7NlL_ANF2F@^0>f6z`AZ(_nyr@dkw{DA%{mI8vaMB<7ZKTI^&F4Eq(1>dsMYm0YxBt2+$J8_4kpUjC|W^xLT?h#Ze zb|-kk7jtW7!NTX8w7X45!4XM55^?`^WzX6#9}ET{V1ZpeAnrSJa#V9j33FKP?w^`U z#W4`xp5xG{V;I{t-6xW?z1H5>lyj7|TD=*I+l@&*68s0!9Dt6#+25^P{eA&;Be>bl z{L}A)0l^!`v?-9-s13_gs|en{(Sv^f*}xt30bH>Q&+fNZQf9-p0{c_P?Q}30FP+;) zlFi#ponOx$*rnWG6>`XBoo`;bsMafIe`r~DFtOq#N4Ve;W$B$6 z-Ne`8!RHPQ7C?K{ye(iMX;xdS{iHY~atS&>VR+ECvDpE2f~)9G<0)BQsn{I9g@8&} z0qBN@yIGF`9h}}%Baxde0B0O0i{Qp`sQ#%%H-4SD)tNo0J$pA)OS6ZSU$5y;;kpDsluf}PJYVDJ zxBQZNl8{fr9#|mBHQF%yBoj8ZJ8m-NTJL$6ez=s=4BYk6< zPw5{X(~r~ua&*@+;;Q8qND0Ns!IC-A8WnaY>C0Zh%0=8qjbm3O$k7xJf+=4ODNuV3 zesA<*RYR(*m^2Kk5Y3LU!xGYE-#5jvz0PqHVGK*y98Zj*?w|6R#(oz9;kd?#3=hRi z%l++Tv0!?)NwOItg78G9Q4qqT_44{-S-Bd7Cqce#F*=4yQ?Xwe+gP!g1tCvHBa|L_; z*1u)<2fcJNKy0DsxMyW)`_;|GMH41|rHM?Q-(yxz{jSLrqJT<~olCSDpqQhfFe(|7 zQ6S^GeNL(MGKqY6hW@41z)oiCesAZDMWk>@eWJxl6)~;apfy?-Atwy;DF-gT71s5^ z6Y|Czs?L}^$z{sKX*m&29br0X_Kem?5~hl=c31Mro=sd_TnXXoeM>u`x{Dd%>6I{$ zo*sv^TdUgDW~GRnLA@-RR!!&*(CVnG+_?%U8N>qFuQuTL+#RcY{Im)lFb$n1=2~?h zugazC^3`r}Mt^*3n-4wv4$2*O_k4Ef+g`zlDuZ50vta z*KOq*N@1x!ynl7te@{Lx4Xe))S+y#*wZ&DgSI8if#4L}6+qd=Z#O30**GQ@W;Y|75 z#33nPY!#FF$xV==CI9RVLvWqKAQqDJ|L0!vK@_nm8>@GBipo;F)^DXuN&buKN`dT? z|3Z|X&)u)ZMO3KsNK(s_s7k+G2H3WkF9w-2GLCse z-=gJtg}|1XwZAJIQiGAg+f_&Vzi6I}5q$+OuA;rT-o^#YHB`v?BR8EC5N#p8#K!KD zPj;{`F;KxSJx`-irlv_IR`e@ydKhk{!9@UMUX2+Y zU%ozTW<`R2`7+$^>&Y`Qpn9`MFP(#$jH(MT^L9mg%-DU%xl*o*X&5GrCNHcRMN&D; z^I3Gl3neDw=-j=EJihAHc6`U)bKN04^{M+Kr&Y(V`mQJPsl69a0GAi&RpWy?v4ik!OpT>k z4J8hpCOL<42>gSm3vUgOIBfOWQhMW?_6wqAp=F6mz%>(P>gA6q^ z2Sw%!iep{AJPxw}Ov&Q)_>xu&&7ozlJ%=Y}LwV^I$6a`ks&8=^A=! zh~rkg928gbKz%!IE4??Tqf8@{wW=2CVsn83QdgU;uVQSynAb*|b@=7)=$;s1h9GA` zjz4(&6KGvKm@@FMw*~X9C?wuhVG>_*%i!*BCm*MG1}$(Dmkw-wmt7sd)=6siJ6{*X zq~t9i8}^k$J50&rm;=}g;|%dDgqAFOG65YHb2op;%gM9eWj)krM+C+fSc>SEKps|BO5O7m`J z76(HXThEt-HglMLyQIAw=)m}6Z{rkI70=XdE7d`Kk6=QcuLXw3sH2BrS&9vnP2o+j z{L8{LBi*Nk-O{WF8F4a#70mw80nbR8S>N2e%4r?Yfl{}kB9YdUIz71!l<@FSSL!rdU-mC5tg}}4WPs8~aW(*Ld zS0-Hn7Z2==N!n@%jpSFLxaoxvTO7y6VSBVxU{*!SN0&}`+8TEeKy+HLIgPf{Wk z#Ky)Bsard|9lIe02g2Eh)2IF|Rn3?)8>P28lj z9rSQ{EXW@n>unRC%!7`%PN0U=quJd866Y^2@)q@o(jBTV8z-h%&~?SHIuCf2(l{ig{p_#f=pXHx zCt>09A3CO1iS1`wDfLI#SHW$DwXQV92m(D0&$u`(swI*{Ypm&f4qcG}Tm6m^4(|%5 z;kGj@0^%7rZ%=f~R<%MxF_a6fxpp*K0?sJ>h3-kova-}y8d~Ow@U=Z&7P+)=YRvW! z);zbM8Nm&{w$h!7{N%fzvbiKL6~Z{~WJkVPdOgP)>!cy>x9*lpO`UCne}H-OY$K<* zl$6fbldq%c&BAPL7eyHJ6~o25LJ-W2qj4e8H7cm&N*!lt-s!^ItvknTI{Fu6In_$2 z<93XxTK|^G4?5uk-sW^nk;V-gHy&VUCOWhJJs=JBh37%swMN^oE3&zkG!FCELPufHm?DCBe6Ca#Xp3rX7}=}IB)s_2T~uW001*i zt(|Xu|pbl6ss?18`2@vfTcP5XHJqmWS#3?4+ zY@4#e?Diie5xRBb9q{evdvGR`T}!wDRVz+UD@rr#vi_&R8_M+L0=ZwIUEi9p5&@!nUe;Dp0u=H~q)0pr(3JAHi*7WYm&BiGte{XqtSuK%l4V{Gh zIiRfnbb;fo%=_!b_A1mp5ncx0Zy!2U{F>L&cBIB(%iYRFN0GR>-}0)x)FnZUuQp8+9oo3UM=Fg!cPO_nN_U}^c_bklmlUDXbDJByZm0Q&q57@9C^ka+A?_!9Wzrpv zWzCJZQ!}WqxBi`RWqmh&dpUVJH;0y!Wn8Um*C@|zs&;k52;M@)=~f|X$15yq#U6K6 zS*hH9MR2uOyCafIx8xI{);G{X1;giIE0yE#@O!HMbKikiISH7n6qED}!qxYP<@;aJ zxF7dnXI*05K;&FO(04S|mlcnGtf1~{pk5RKt>32A3K(B+ydq6kt0zKlj=b$A2)U2O zV%r?0V7}W=C?F2m!=Cyx=l%D<`EauJ=U;($TgcMyJs`nxz@DB335(UyZas|NSr@7j z{b__LUEY0PzorhjHa6CN)=syUa!u;qc{77wD(0a>R!i}jO~;En1mf|7Sl}(E5R=rG zgSXBBHyeo@<6H5O;dIjcl2WQ=6eO3?WxO7?`(Tq~7MzitM^9odT?A9p&9`;*wUn~7 zat_*b^bcZ)&vc6q>xeEbi+y0 zb-9(M(fF1gVO5|j5gFhc9^_3$d9YB%r%b2muXB+wl*z&fK;GCtc^7SK83in}SOewM z;|#3+Cq4Y255<-beu!=X&;N*~n19ChV8G9f<#XB;(pG+J09P2t%->P@5a!-o@}*{D zED1s>?5}ozD~KPlJfp0G7R+Szk$U4=V@X;~9>%kln(>#JPPCC|1`&NS?X6#+O~ACx z1Pmsonkgea4)X0=nRR7M4i9m&!-|@)$N5CFa=U^)zCHv>$wQw*s;1=mOE&~XuzK@n z0uhHhT#$s{NER0d{(jY)ExEmN=GBO{OJW8v+lUI%=nPmMGCSL17uhKq%}`}xw^7GB zo;X9EjLqzViqx8J{itGFkF96&6biy~$O^ZO#txd_$!G@IRu#tdz@zCf4rG;vFs)^H zbkwsjr+`AA+uxY;lSd&Cx$mkta~7Fx=-*mDoqp%AD55xS zLTd4#{b7A8QO3o_iTdj%`Kh90&{mu)qZMC%G`;5 z(7(%tzsw|iRqwu+O<#Vg;mVm-cxhx6uo9UH{zlxVE%@CjK9L(+9%xEK*E}CShNbG= zlV0cEN?ole2j~7Ix#?&Gu$Qb!0A3-Z%V9^geu}c1u)QD@46|}JId(D?BYSHHz(@s?X!Rw*VE>{q?si`_pxm=0YNmr2?$HhK2=>UH~Ln9;2+ z{Q08>QhB(KN#`uoX&bF4XM0f*J^H19b-MaH49)yK+bluSaLo54GpDvmOBh`rfy8p7 z3gO2}n+t$0xqP>xXem3AqZ_cl))eA&KaMro`3A#jzd-Rx4IWgee}Cs-t5Z)l#(knZ z(uvoj8@h#XJW?_q8QMTl^u9liuQ?x^oEYoQ#2c_d76OJeZRMygv6(FDsZ|+-y{~pZ z6tmF01vmFPEzpvH2VOR;%}TDS%Al9c6)U0Cbvm}_sCO38bn^RVd;Lh|Dr-Br06C*C zFhHhpqZXf!{FsDdrfQ?hy4&k!K!;tryY8flz$2|pPC|umDL0&pH0>5awk)WYhR`Fv zICrm3LB;VmZ{vH&llD}J#P%sbR;V}y9mlk6YTOzkO?9h zRK?Xk_*5PBe+C@h>x{k2>^nCo*6__$v^{fSxjH{7dh`T>s~@*_<$sSJ;mkiR>7G|; z!EqgNh1Ql@9U8IZ@WrSGkW0QCmK&Sye1S=P$`@qZ$7`7hiPOYAT~YXe?1|gzccYZg z*~)&IbF5}kW|!PSk&C;lUlzM;TK3}&Hlj`#Yh zi;S|B+Fx{8US9lb`r+sQBpe!Yp?@w@ID@_*)JCRH&PVm%RiSR<58lPQaJc(oX0BKr z^SuhezAKGN9d%e82M3mC$@rmKfDgaVDgPfOztyDS1^5*c z87%U*6BvpLf5L&ZzxgH62&9_r0V0KsAlD~DoEkw%B z0Zj&4PU=IqxbG2-93BdhM>v7Sq1DaSTgw2h4cjap`=aZGfHNi|qcCFeSE zc0E|5RjK;c(@Vf1CxkT9&mf%6cGB{oJ}xGX!D;-~uK^1f+2T=3t^nn7iC&Xyv78+t`2p%`^2y>EAcFLk?L{|Vz2p|TR zUXP^abWHcU8+M!qt6*xU1Wa^l4}|~c?l)-sH|gO974W?EFEPtS@ZTZ!FtNdcmR&+* zn@DD^zj>q~L9noD9ycfwz_8MLa}Kz&CikXuI*m+|)KU6S%{UlaRtf=M7$G4c#IJU? zcFG;%D9&!_<@ejjcV}Q%E5%e@t&)=|Zo|V2{qB!+RSS#hWck(R6nZ>dofB_Wy(x0~Y`M2^MI{SYeF+HoyFTlPY9m-EsLBCkw1< zpG_i*3Ym=Ts?TNI>19DOeKr|Z%^(qX+r%YR0>x( zD$_CucAW$0g*^^~Kho(p23X>Y_n&_z8W zj)vwk2uO-_J!O7ZCk$d)UXPJ25yu%9pX0F7-5+dfXxxE0LhER{T=z1#Qv`A1&lFjo>+O<+c zB8KcMJ!Y#I{6|>a6L(qQS}DIlFi2e*93VG^0g+?@FgXHZZenS829Ye3$cvTn zUcX=>cyRex_KLHF-nIquHh5@vW^A`$Ku!v9@5x7<7)mwT7CSp$lx!uDJK<}@!lx%j z4?o1uxv4-mB5&*GR>6`d5htSiETCUAk&(6q;e1+mTUd&-pYz{5x?D5FflRdOAD$yG zeaO#b`i~`%Z*YrEIvW$J&R;&ulN6&BN5aM$KR&77n3<6 zJl?dZjr?*K=1hF^>4PybqMi&L*NH$VMWH#SFS%34v2LUrzg#QH@~zc3cNjQ}>$?)h z#<3>SDl=mwjCq{6d$jc(YO47?Q_C#lgDxT3p`OeNWLQjrxsYoC)c+YMJ^R%^_FA}? zFh$;+iP=vH?42w~B<=?`fq7g;-+xtz%g}wYA>F^^NyxL`v&d!U`+9VgC2B(^>D77U zNM*sqXZ^Av{k9jL@Wri$sZ=)<`~IOdZu$xC=K8fz(AOy99ipYFQ8Z+GrZav!4e6MK z0eGs^$pq)(a)j{H-yrSZ1(qxUpe9-?~$oHrn-cU%=^~goHPXlGBOc|lV#lOr7<{r|F ziO-oZ%rgxxo%@KS1WN|-4$C`r=@bRpBEPL6MBj@I6QaD&ZEo4$*muY_&XAXVs?6<& zj3^(TV{MwbbA22C?hEx$pan5#8XXR#m3{Hl9$tyyT)CWI=oDUYi>(i#tVyz9Tg*0w@-`_pl z^3PYwGp}-chy6(_9~`7w+HZia;y-4`%`wbga&ra#)v<~F&!z(N12YP>X5Wu7aaas| zsE6k5+;aq~B8Y(FX(D=8S8p`wz-%@MOH8O^g;9)wOB> zjG7^1!^Ua}On~H1k3@4LI27D^EE<;ICb?03q{Zh2hh*9Z-tDL% zxVjnfIs1dMz}8z6CVE@APn<{Z+}nrrMu(7CfyZBqvx54STOB{1*(C1$-1%Xe_hBP~ z8Y?E-+wdz6UJ}=I3}E$VzvrC_0BHU}hgn*&9~m0mg%JrgLZu%!47TLV?D9R}>Tgn1 zZ&uC;Wpd!$vn&D04R(^}!0yVd=w?U8_rlHR6+*@3a!XrbvVv@$?Og&o$+d|#O~31( z*{4#}0^d68?_}t@`0%26Da^Izd*UgKk^+u93Nq{db4(iy{d-|e1$aB?Cu%OCTGR)? zwk-hUxaZR0sIChhAG<@GE=4%|@g|t;Eztd1A$eS#XgUxoksuzwXVzGXc|}&uxIcQ=5zj3C1Esp z21{pc^9_+)-^iMd&~AZ1*_}=e6+z;Y(PTQ@Z9s_%!W&g9E-MM_T*pb#$!s{<}|kc8S5pxpG;SC09%;P%;6hJ$>S#$Xvd` zi6Gw>bn?d+47E%L))<}MSJJ`k@OrM3iZ8W-PY_&jV_K%_A}PArNFzp{*)|-u2$^k! zNB{;BVib3uOe@=?_-*g~j&B8#`ZR^jz&rUm`&8JcbGuA!HBN47B;#0bf>efw@#LS_ ze(TVp2<(drtt|PM&51v_t^w^#8{j`n@QU>rhd5tyVE=|k7EI9mTP+dkE_$4dfsaxH z5@<1S)Do~L486AAy1M|fC5VqNcd`!*FVwE3i4~X-;2)zj%A|hr>Lz-NgovnZpoie> zYrg31eUUYTv-`oAR=LE$mvN@0$zg?}RNx6IHlxEADnyUiq-mO~^1>G#hIq9;o6R2= z32Oqmx_|A%;;@;y)GghvP3TFp>&ePX4VkYaxOq}3CH>0KVG{1^$+6#q!fw^~bR9Ma zh-v-}9DW^T$3~xRwJ66(DP)%mOYTp zvJn7IsvFK@*!c(bG+%X@#4;f@(-X~H@6a9`kg}b`;c+2O3 zPjY3zG1TvjY$ZIbwgku43)XML*>F&V;GmPzy16PVz!}z%4=G*rfRqw3U0C??5*kIO%7t&^d%kgs_m@>^c!`LRyL3Z1{yA`6|Y z>rUn=Ik{~z{TGe%77qH5+b?Lp4-|a-AY|n)gk#G81Mh8?Ae7thsM9Q+s9wdUme-6; zr8QV!2#uusLk%I~Gv(!&%$-Atbl#CPN%9F8rH|07fH6NJkn)Q}UMq*f`yY zlg=@-i2vgKUjeFrY2ALjV*k)vn)T}}o7j~m<$#ziX!3F&~qe1BMa4R(>OFsJBjl~mn^y#a0Il-ws|OqFFYmAW>=27|iQtl%|k`$iNym48&dEelzUqW;9ae7OG~qQZX$1 zhBn+WApNIZji~^Ow7u1VmtpiEW%tA0XLOp*epT4ex@gJohSml7+sgC|KVq$L-oEsQ zYkUwh1Ey04_i1{vs^C)pb?jTZIyGWh6TB+!tT3XV%7@+!_a(q)ut0mHK{B-L2b*1m zgq2mR*zAi|&Gx<7wVgy$WES)FJP;{Ku!0C8*KyibBLs59tv5M9r8!Dk#C3@At5k!JZDjBf=e{cYUuV+FnRZirvT7 zM|b`M4VZ~AsqvAn3k0|tro%!cUvhW)?r_b)rDd*Lz4)$!eUKN(d*!L zhi=b}@O*^{BYJ-x#3;tG9=Ym)JulP^ZC6COP0#*R4s%Ul#MwzrPvi&MPaCklUIOXR zlL<7=W8<)WuU5n{%s6u*oEu~0GDH#lQRTX%#wX1D$;@xco~bN@nC%xhkQUW(XZeJq z#BIs=&R6X0Jwn zHy)cX+eCN$(Tt-DrQ_FXrqm+`->izG$RcNxRUdcj9Ix3PlR@kB0i|=U!CW5znPRja zUO3mp891ld-D8IO1-qV&Gd}6Ff3GEYw0OUu^n>6z*8qkiIU9!ZL_10us5~dx|24B} z11BHguRKo8_Aj*`tk&bSn)?iV=Wwy+9^*MydU5})5$mD=Qi8tB!u_L}&$A3}LP8u5 zry?MW>1wyxP!mF>frnUZ*j{S*Tp9sS;<>U);e?lhq^DBe`ks@NF$%S*B!knN*9!-H zl){dK4fjr{4pX~6G~Qgh5z0T~%Ll9O}lR|c|bEYC%itLDz zo?r355pQBfWDB zqk5AT`Sw;2`4_pUA5O<6y!80#X@+e8hD(rri6{d@__N8Pd&0+(GTNOhW^R<4(m(2Ldb@~y&x5G30C{CcR`!hN&#nrP z+mP1hbn?ghlsg)_z60{Ph>Hpna2z?RFUm@}UmKyh4j{vBe@`?bm$EmBE?!p_BR>RnOiZZmup%jXe{2vnK&vsMqH`4SYGOWF)^mMwk2`+&Skj^rZ*jq zWgZzYDcUE(AKLI7;2MCc+8M9AJ`F-ZRRG06(!$;)3(ABnLyT)HT>`6ztjKJaxWXM% zWk9;JdTJ$~i>ACaG9Amy2%9En>s{{F8uw9&x1js;4r^&_ZQ|2ao2}4B&Ao)yTRy^* zsZ9&3cV~7K!S98F)5c&!0`cq-VNad1uEw^q*aR+o>;jy1pNt32zn?{}7_MY_Mb-P_ zZ&iaqoPXgTP)e$hr)U=vP&&d8`fv&E%mn6Uc?FngksH|My~bh~c)f&Fs3)c z_=L+&isIc`kZUR_!tzs#_Ib{f$%xaZuuU&gQ401&y(SbYf67I9Z&jjX9%JuiJa}ndCmV7nU|nDSHn9f-JhX>Jf>PYU z9G>ZE0N)u@)N6h;&x$^I@X5tx3H&iq?2NGg7(sMviW3H22Z2^)DsEE-wi&emG7qo) zx_CY{+9eFl><0$-6%TsX>4Ma_oa6$thW7Xzo4xRVZVueWmKj(^1x5|ox^8hz*^il_ z9e)}5|CAtA4XK53A>ujsq}&uNs=jVwoQO4IK^#$)gxryU`dMuwm{3H8YRRRBGH!M^b&6A2yoM?q5rQte?jtnHu`Qs zwWQHrz6Iem;#fmEZpRuBsR8ZcZvRW{m-1&+hkoAOVm)`m1lpIrDqsJt@8P~Tl@yTW8BU&K6 z%&Z7*&^`DTM1ivSkE0!H0F*CPDTWjy`a$bs|E2mML?~|&;)+*Tr{T*}d-6q5#Gfq6 z1Y0EDl7`Pnj5vv^6M%nb-(>=}5ggy!@F$wJ&q_VlYN*2otG&a)-7RUJ?YYXdNuit_ z95i39>S8RkMSRCbf82y`F*+Zm{*mB&-a2^fNjuhUE~5J;P}}Hi##tMDcLa^w{Pu@| zC~Eo#?EgSJzEA@~f=HaQh#%kwMS@Noe<_l3iTJws^y``q%PJc0QBEYNw<}YI381`! zX=H>XNtP~NbiIM6vhR-O7U7D;bR(4gXldXg-^L9rC>Kuc(Z)4GR6oQuvOlX3B(;7+1G0ZtP%n0BWm^ME~`y=7-#!~&e ziQcE=IT;? zE>>Q76mBxB=ok079CjW4{KE+I0bhDvjp`AohVtP3Y5!$|0hQvKx0YWzbXnBnN-Z`` zr4w#2XkSsVcg^Mdiz1#8o=Uvv-8B4(%8CV?9l%z#UMadcgfC-OBQd+A6U%HgzwgXl zOU`5DmW1j>HxEibKt1D@JKR7NqlCX{ZmzZ*=%cdRJNJEjcSUy1qYc+2qdGY_?1T7f z>TzODhsHHXKV;)ETNfy^#Ii`&w1RrcGziVTIoHLl>IEgLqt6)oh~{;(}JUAotec zLy20Z3f1XOO8mtZMFi_m<=tV+M`c}L?6euo#YIuZ;&i}389%On?^K4>%Trf-O*k7v zI~nIS!FK$VnqCRcV9zYT)b;TEC{Sg}hF30Mx3j_kovsi4^^Pu~9~^|TMZuRqTE#$80b*t&a3{R$=gUrs_I51IOeD*aya+^dCj0MNmCx+Iup6jhch8Jr!8@fHq-GJ-hDIaaq1TdwOrD(1om%JMdQS2OF z1D%?F@oBwuJXGiPd-dnF&fnq<27SvUbuAi8?qdEmne)oPWRfaEMr^}>&Er3lDIS0p zSC_l96%X3cxT#@1WMdgM%NaxtgNq_$ihjPvhYt@9N>E?51T{P^>O0U*-MMI3#{d0< ze}z=#wkja5c%I!%_bzacm0+0(m9~pdKVsdN`xVenX_PDW9J?(SR#}_|0hb=beAl8M z+V@_$jTUiWmaRyx_n3JBEe?t=rC$p^>yr)UQ}_*2{=U;GU7L7aT87BQQw(Pp-phvz zHQA$I>S7LuFSSs4ug5iiTlQ4b(n&jQFdvvhmJCT+TB{v7=i`lk@+HdYjt`+ZSgNj=sNCh>qPoQtxn z+Mk{}O)Ry-*fu++Um0UKE7^33{CcD|54bp3 zO+N5{&IN#CpNYiT@rJ&;9|G^tgm?{HanCxuBJxyuIlkj_=pUWnaK~FW4!by2?I0EA5{T z{A-B6m-U~x{qWZU8UFxwN?@l9=a@@%c`8snvtlH2%m1I>{TpimKU2g5Fr2hER>QLg zXY}jxmq+^d;Q##KK|%jH87{L=yM*vNKn^62eeswhV!Yvo%jEilwSp~T-+m(wTXvS8vnng z`oE?6Kdbt~cl@7K{SL$NwM4sQx4-C<~p$7CL z07F#$IfQtFc=zF%X)7ZkKnHmTRKnC5#|$b@U!Hsqr|o-EOD(eWjz%8!-7r0;woPB0 z*xW}WiEK_gcT%mF`|+b8uZq&^2yY^eRa*xJmwM6117r}ciRX{wJ=Fe;kU)fg)Aqk2 ze1<;40Nnh5gzdW6n{M0YSI8Wa0jmhi$o2VGOKaKwv8B&Ay-8K+`-Y!X zW+?~i_B*W8x5^dye#DM^Bm`d>!6J1bdu3~XiYC~v4L=%BBewl>hV6L(z){L; zQp%?fPy@-_4;P>>_7~uYLU#t$7wLN-w@BKb<)gIW5E(wseJls?WP;dCN5{_tcpewF z(Be#9=@3PTauQ(xxPy~#pA!Lp%JvoWlz#F@g}zVz>_uY?_;tL}FHrKC*;XaJtCxzD z*}pC^fj-}ZEJ$*U)p)eopUq+CfVQAPXa||hu>DTd&%c#h#C`LBerJ%`9ct~N{}jEr zkExnUpf@n+GsD_vx#|JE(|4Z+7P!CPjy;jk5BHr7gQ@?6zEf_)2M`;ygSFv|h3492ZJ1{#l1q6K=0y9n5~X~SratN*mmeIr zF1NWf)5%6DQEw&%q}1XXEfv;jP)|?R;o%yNjr`YiQC+KEJ>E~M(1Wx{tqZ)1H{=uj zO1eODm8tX*=9wY*_2w(0vcFQIU(2NYK-uu*eUGXEFfv%4UzWT_d|IVanB$il`zn)R z+-P>{g})Ks$6~(8Ni_7*QBGrHswb{j8L%5(6aHfzwit1~7wFE~xjW_#=fd{@@u0EC z)|QU^&Ylh&sH;s_XI5R^9eFC%$S-E2@D%}dLGmU-N8Zd9uOL#q0->p)veZ$rLSf3V z*3ilkr50&%k8Ws!8%#|WaI>~wLz#v3aW52q#)U?c2UCim9O7-ZJF9r>IlQ>(;G zpK!q1Wq^!cL&XOUtlf`<8x!DyiOeP8lY}tR!beODfR(a0wB{w+Ea9J`u^v}`_ZJSc z!ooLBZBp+B@?fu*HTw;0@Y)qSk3J52{e#eL834aF%%C1t_u$t){^8dE`6|WhiUsSc z*DLWe8`+IAc2-7_>BbC{;Y z!Dix^7eeLJ-Cc_n)ltU|8bd55b6 z(Y&DYl+i{$n($Waa*671+@kj|cc(w=yO$r-m*hc_|JGp&c4dLF8l4iI4AR z`tac)iQH%20vAazv@^TuLHJzyDG56|0aSOKz5&9OowZVt>$ea*w6N69BM zyXZ-JF@IR-5as`JY5~-~fAu-XWBM=NONaU9!=HqO85=b3mLFOX1!oG|8$4H7Of<4zAn=>tDLIox*F(#EPin&N}MPaUFLJVc^D)f9mm~8K5y2YY{ zFq}aJV2#W{@#*IbF`YlP09&FEI(1!ekGi?O2myMAO}#{QWUi^YMoE2=3r}DAU~gHK z@955dhC8Lt@ZAumYu_(xJgG2#lcrJP6^j7XbVdtp#Y&&MNQ#|$3|XjN!IR;FPUBQK0C-Rq5QNV zLBXGa*mTCy4C$u^#tPWEN<7^b zY|Wv~UhdkNchSh1DlOj?9q4g|0L4SDmfrnlhTeIfT|q`N+MS4Dyr3cG9*rH7E-0%x z2W2ueipW$0Z8YvwGS%-zxOCmZTb*6_`yg<7piI&VH+~l9 zow3jPS>k%=H-ua9wYsHEQMP&p(VI>p0Y0B!dsp%zw;r$rUcK=&zc#%5%#1Q#smRWU zwxuV2%P8@#+UU+Ey7;n*NAud_dUqqBetQU;F7%jq>iWjP`?rF&kpnCQbhUVUCk-%d zE78B*<5wMV(1+4l&4Q2$_gtvDZMUh4zsj=5+CeGF;clWP1I7|?#_0i5Ao{)p1NIu*JWfTr9~z*X4a3< zG;BcyJ+2cwU%w~jIYyvS?lmaZ^uBn-5ROit;d0&5T677+Nv@l9qa zm7o?xaex&kz;Jq}Js+U6LbitVrb^9sGihAL&{Eo;?Jx{+)HU zfYNGRcB{tXU(~l+z6>FuCY#k}aevR5`bUjOp<)vvL52(Jx1a`SBITp34Y=($%|KzS z!oJEGc!M^V>nCY01m#uAI)_#3|T$5cjFUHj(m-6Vyi?E>p{v^ zKF9~bYbJRCAd#;6j@Iz(69JMZY#e$Mp0LN-rdv1!GP0cxy{m6hOFYXg8PH?by_4~P zr=ZOc{n@}+q7`r6-@|+9Z{DaSru&5l0Y1k#;a{BUw!D9Utv1aPL6!k|I**VCNb=3tY^=N`_g}!0qZGt1 z^jEq%eL!f`@aVhdK^L!YCA^{ga{3920f=brL4DN983X`rXBDuq5mvzIw&-ag&tkP` z@KqE69*jh*7>~Ow1M7yPjBX8V{WSS@CR{QnXPn4)H_d~W!1>iB;mL;2KRnp~jFKsq zh_3y!IfWhs7FHWrK{;T^Q_YePaJOUbryPHI^a6<9DMO~EY~j@W>BsZdTl%k{_R>Z_ zt$=`1c`+zi6EYBaPnY}4zE6RY*R_kP20G`dZKSZ-DWR)?V7SU+Wx4%+>8vxKg3N@D z$EfB>&(rYPDF;lh&D@f$7O{S3m}3v6mgeEN*O6fl<%v&!G3te?8Cp|?R~~M#@wTI~ z(CS?B1u>HFOi`WX?nt;astkzJV4xld@jLB7IF>DWFbw5oV6qLs#ac^x2vGn~YT6|4 z>JuMy-yTQ*iXIoJb=PM(5sg@pN1$jU1(71ogT!z7`p0$ATN0QTa_1b!pAH~Lr{RX2 zIaqV-mpYAH15o#GsOE`nm{Am_ZGXV^fYNTB{*}Nmq5x~M57oV?<~EojtjY4qhKblF ziRz=Go#z<%4lf?CjW{&%;j+O@w&oqPlM&=xO8Fwv zDl?eUQPI?~l}i%+=D_|QR#iC*?%@R6i?c#xT()@IPsi42m0+hI80VxDjfCuxC$&bi zqr?{;nih#JQll@rxQ8I}#%c1KaLivt=qN1{Pu?K8=6<%9n4=ON6O)V|S?*PK5p^M> z_|GKb*}nwBD7%0SJCFH8_Qd{I3bn}}FiJOcYpwSv=LI2cj+sGSC@jjy5b4kB#}Y@T z0g_di$sgYzU0$)~42Dq%GG462>l>Nltg}!$+xrj$DVbPU6a46dl%G%Zk4h(#8Vu|P zo&%Nr{VmdP1{`a?!TgRB1i7_Vuxl>#9`DQJUAIUFo!E$ z6n=o1f?fb}d%%$?s|ddHDom4aL(EAJ!b%@+AYGEoy#73jf+#Hx+O}1E(!)40b?_pd zIKd6ACsL#Xo7GQN*T>``-|ai{f&hB0lzKUFeYdeB+|Q7bT)rW%^sj$&R^pX3Hoq8jzbedDS9jlWto2dLu8CCP;BbrEP%LI^ILG7X=arN zoA(+E5Cg&i*DK;M6@WNlc{#cC)NDc5PWTxo5?A|iG!F1!;<(E9Pn_uHA}m-YVl+*= zY2j(P3P4mYf2McJjKCN>!Lf{mK)t7QgaYfA&SOz1D#j`>JIMoG5Gt%jr8n_FEHn)B zPspiah&2A_E~`OPV5C5(7_$mpbdDXEE%v|5Lq9 znDg;4V`!OAe4iExZ|{}PWKpvlc)#VoSbY?=qn%NFdQ4h0Q6bPk#OI9FJuK3>?Sa=s z_K%?cLJum_t35d_0vi`0FrwnK46c38V3L!~WB zbhLiO>TI(d_I?i8YKH_mk+&(X8B*^S?Cmeo6JNg4wVai|?k4UG{#ZMDNoWK^<;cg&Agp99!;LQ1U;))kL7-+2{g0}+)?yxjpQs@)__AfRLRssWOh zhn*~~c_u&_QX;7d+IIXCIJW%&IygkgMJT|yfFAIuKze-uxoLShK)L>qpk~W;DWLqPLet76U9I_P& zOD`F>Hoo&YC>({MFg+>0r8|CsR-XY#OPErv^GLfS_1a&3^EJWjT9A~3ImI4}RhSbX z7X-KbW!!!bZAc2>?50==V+&TL3Q8P&XHuu== z$QzN^`e zi$3K8`C=oy9J0}By4TL>$P`KeR{&xFQx06!_kjIOSF-&a$MP+x_T$r+yf3PfMHxb~ z$C|}k`a?gr<`}&%&h?T#;EdQFVAfkuB0>JAt?%se*0+Wd}z)qc(F;d@;6bSXvqVkop;Z z0iT7?1;3I@T&t;@0ISkw#eMFH&NR{U7QLcR0p35|2;vWhIxkCt)-ccQgX>?!+4>+t znTJw$&c$-vyR#+wjjCgL`!^F4K*YNcPPfcad+%j8bhKu;A|LmN7%T{0l&siE86!fM znZ!Pa=%!&<1=}NHlC0Q07+aRKkj0J3Nu+`O+pgpN`G;0Fp`q`E2bs!nh4;e>GVh9~ zMPE*h*iatizfWIK6mi^_fZaqdU|wiFBcycvmi?bz0K=X#3ji(zxs|;O%Z&1Cal6xV zMEJu*Pl2FX*ODW0F8ELlKL1q`rr;%;gdC)IGDstN(hBx5?RZbw7;1w@>^A&Dy5AER z0q=!3pQ!#!kfZ2j#5^VMFZ6oIz4~AVXK``S-EKrkIdiM)r4m(z1bib$LR?&U_-c=F z-&iKU-Qgt)3co0ms?pK)K}*mPE@QU#$%YHX7JK47Pm3NWrIOgW@Z-mi_b9}$#_Ien z!!N(T%SJgo>lo}@t!_0bEuWOlxK16-eY~L`1|Q0xY6FJ8uA_WcV_)-Z=JqAJ>q&Fq zVbQsGVGg-}YAEjuo~};^2B~J~bL`bMw$C*Omu6X@bofP0*Q;;RouUyNA`LtID3_J5 zxxc!E0orYxGb0n92>W z=qtA9^wj@th`bMmnB8Nt3q)e8Xbq9wLGe0@7?1FeXn0`%V&gXBFN70$k)%hnMcfXZ zk$l-+(P`yctnS`7T4em?l(e#XLFgP!L=qAb?%v<|FRm`W?|aG0Puf;5@Wp=qOub1S zcEUmQuAO-dQ&@CN?{bIZV!2u15xR5hh#{GpujJ7F{|1H7JBcYSL!FkO52PKS3u zMvXD_MUy4Q3hsCAEj<>hvG zce5U6EgOwJZ?^@fvXn1axIH4Fi_O>HCsq7nD`uwNuB_2iu zsF165uYuoP_}Utsbg*pJ{kJz(lZ_B#ex6e%U+oG>(lYIRk!R2Ff{?Ies$R2Im%TB~ zG3izkGfJO9kaJJrn4YV55J~eh5` zscRtqq$aD^AVs{|u9}(({^KI5bM>WdsLp+RRY){-4qbj;{)R8!*h8oD!mE%@qz_4N zx`)#lY0G9p3B$T+MZ0>9roV?jSuTa6KkjTReTGYMFG}n{z36_v?H&y6t0dYNWNhO2 zy$Z{~CPQaEmkr%F^A8rU`Ip&0BkTVh146+AI}F5h>c^i}PVG%}FVXVx$)YP>|J-BT zi=ZEetmMJv=I6bdf++4oH$`u*P8^GGPBov|eR~Y%y?>870Vg0NgkIJp3|}|zi7c4r zfFyc#=wm%w4y!V*(l<2pRdOxawgJ;-wmME@c;|3GrOvQw7Mq+jGNssB*J}aOe72Z4 z&C|G&VC(Ea3?_Z}8V~T??Zb8RQIE(4zn?E@k`r#d&dFabse4WRqYp<~Mg~AF#1#$| z^*j6HZ{J7vZus%_3vAG^vSKi(ak(BWhD?>reLoXAkaTws=IsGZ^Z;*KAkZL|PeN$( z@MuUBJ>dZ&y*IC}10UrR$;g;qgWGMit zxXB(NbHKoJe}JcSxFYfobC34*6)?_lO1cXG!}I!d;YG2 zyvdkltygGwH>+-VgoK%9MH=phE4|n()OjMwXkvZ_r=$#hSM`?UU_U><6qhNOru#dx zdFPW7e&;PJ-y5UC;;L(Y$4V#8ok-2`d?kaQTi?LU1}B`GseI#wYJ?HcGePT*F=wfD zi(SO2f-5Ob$S;+x#j|B1tKTiX#O*OTj6sh{{upH7s~sR8hM9ovSw1z<;5fK$N9aw< z_Mx?>uc%Z49W}vie`!ky`6^}h0nc~%wMwn# z-S_xwc3jhk8x~iY9>0{&}NMJkM zdl;1okgE0x7ks&*KJIL2U)9BH^nC3U7~&5;ovrIh+G{QyCQ%IB{^^{QfebhSD<~US z3_+clkrVE*8r+D+>F_q77{*GkkMfg8j&uf!sjg1+bDc1K<@)(jrnnB;`Q9|v^pZU{ z4b3h%T@JCsY0*PIsu?wX_*S|oOQdKCM)+h6ck65J`)ZX%WVH(7bCCBfdy?Do8z+diNb&C+22m=ICvLVQoeRj0uI$3#+|i$%VEIL%Nq4Ma)iBm6heb zdpf)G4VWBF&(y5SZYI#xaz%cw>l73-$1lZ+pC{mp8EvC;aE+CGWCXV|qJAh%%&c@S zV!=}r5FD#%BQ3iS^*`_q{Q;443tmUp*xWuZ!)5_tjiq^Y`-4fa>(b~9-+|&y8)@EeRtb;a2N)vXu8&(CNToI3 zD4{%L_HU6f81r9D>0JxJ%VZOlZjnVD`fpEH6l1=$v}C@!@Tn=b0{icacxE}QGR%@x zo>^~`sWB)?M-+QC%EC=>J)VM`^_Mk2RkLzQ`Q(gnOSjy7yse^jwu*JJKWBINm0N8* z!$@lHDR+MB+aTme%M}i*W()JpIvLl?ozWQzbFPjDx#zkBTY0(p)XH!gpa!9EP3w97BRgb(Iat4X3oLv+|Evzn+0AY#aAA%w`{jv-`FLKCMyBo; zo9Wu&nJN?X$0WQmb5U_wO8PZc@CJk+l&xbRvm8Zu8N|S_2%&8MD_A;~2&4QPXyBj-ejGk$?)XB$cxpS8~53n7cZr@P{7)ww{>a=jYE67BBfxVJTKL`L35SVp8&(ec?Vi$Ib==9o=$RuKeL? zLOQM9R8{kvpFXo!O@n?q3@cm0sCJAQprR=qm%WcluO@V;#OU_Y&dG|#e54_E;2I0~ zpiF13FF*r5VlbhB;42}!AS6tOO}%rfr_9^I;%+_i9CG|k$g7m;KvTfR`i9Z230Ah% z`F_3Q+T>%B(Rdg>d_p;Svl_*H4i*i=Nr=JG{0WC#jU%kS_lf{ei^CAKE7!;@Iy1Q3 z8CA^`TCjm%C?TCT9){ra*SjUSuPQD~s{p&`d}B3~BGVjy9X&g0`b$Z)xF+aF%eoh1%W=!&=lYXk z=)S5`Da%&k@E)IYuYC2=p8J-}6*8BM_yk#PP65r*8l%9p?+`boahxy2dpDKSkT)kG zd~bmrhu+&Oi<}%f;+2EjXw0CaZ1UY_+1>8nZ#b5zSii+b12pxR#_W5&X%RS_fUDbQSIJfS(~^_=&DYC<7R_Yr zC9zoK!f)TCdpx!%lszg&$Q@3u8-trm(08XOnX7FV*u>}<&wesHTYGD&H@A`y*%m*1 zZ;20OM0fT&+mAGwK+{I-eBXNiNHZF$F}1T7#s=m!nc6A){utG0l*0XyMwxy{8Qo72 zO&x~x@doax+bV*Ge5-E3m(cU)b_jqhCIaUGgaGQ05%WGi3Y)1h5z15WBL+?-25>Ra z#WmvcF^kNAF{G_YNld0vokS`9G6{?X72~)hDAHh^oKm%&s$>&%{t>lNGDocnNyZFK z5_1#y_L-k5;4uF5-tc+@lk@Ytv7#j^4CvNV1O%kX{v~wTbkn;$GS--Bo90gYm)C}c z#gI9TO&;R=`7*+Gg(!Q-60sU6xe(5k|jTwC*M|V{z`B&{yO)2p-v) z={T7a3RGJ9)W>plGn|TnM`V{ZrL~*|+w<32sbI|v;pi^_4I&uXqR%e8I*sAo`~>TM zhal?xY?$Niyr;_M^LcIYlI-itD{I#Il%So)kO!)F-@CC1;IV!e`;h^L!Ucv|$s+}T zf~g|de<3%{6u$eu4sQ=Bd;4T;-FZ=jcHb*DeBBCf0ny7XpNAl?EmkLXb9-yVo6P0$ zo!{nm)&ZB1nw?!AuG5(HQ~Y4-O_SsqYK+F{<=bfaHlAs{Xa2m&*#2^vYV)lH`wVG?8V|LJ4S!UTN>@$1PH$2!~Qnx^j-w}SX zTh%aA>w*pk51$E%X-xTsPfM2(`QhDS_j^L46}}UC_xRI5`m(|Pktv8jXwVEa-gt5S zTI3nt3$*&oDOip23AjCetu8lxj@Qnz4T- zws?_H?t668z+Z32XWMj*Q@ua-ZQpfq#QyGViEL;oF^Jj*-ofj@KA3;yU_t6*9t|TS zmW>W?_hIH{rpFf<(1fFZtYx6U~bz{RY+LmW5SVm({LCzfyqo z#|9ML`PP(vN#HqOX1EO{#son!DWWb2NE?erbccIxmjkmet_^W0Nb!QWNe7g*Ob`+-l(aT8paJDCp=cSjKg789Fov9o6RFRCUg; zEb`Ubf9QYX6Jf)YXqV8*m@9BFx#-g=*|ypiE}d0M0p(h26ZlhO4~DV`)P z3IPk{WbraGB9jX`e}!1P?ac&KSF3A#)v?jgW>?pEVNuL*7+D8IJW($lmj-WYk|(F8 zhUhiAQcKNBAir29Wu30n8k$XNVm8;I{vKBkBR-vHVT1NAm!<5xb@1mi{>^H*<9VsYk^xZ>uFEixF#~~{(&3H z-d(>%@ja+;I1p#s$J?6A)P!JS7sGiqSI;S`1|j4a0s)_xY!nh%UQ)DhroM|c$oIE> zpzvKSw@nQ-z1PG^`Y?o?|4PE?$0JJ8ve@r#t3qS;T33>|GG63sqm8Bki4OFFNt@Y(_-CeZ> z;}6)|ns2km>BlKWnFSm^Dj=#bczlr$sW;nMo9ZqpJoTQu90)Y5i7)Clo-9>6dt)~J zgfytmd)VE&WO2Ym*+hM^)kUBOT0aUEl+{<{)la$Ml+0DVjKA)oW~HetRV+!B9S(!s zEl}?z$VU4C?~<+^YhI61$JN)3eTS4O^QxH%DOS%NY5=HJJwKXdp3gip_Kv(sF4R41&fLh0ATNwKYq*c;z<|8yom$2OnJ+ zS3I4=Z{sw0kPj(x;5T!guXrLQ%wH--j%ZIRTAg346p2Z4ZAA$Gw9`bOJeb%a8#(OR z&0XQI(&l2{PD5@r@k;}5g;=0xVN)8lD{Pw5w$+95Ie)+@UDyfZ@Wbk+UpOAiHp6L( zWK264O}*19XFy0eACX9SeeBa`?>SwJ8V}vza~#HTR4~h!icYJgdR!=gB+T`cJixa% z+Gs?{XvVh-3(FuWYDU>AwKnc{D?!;>a5)S$A)l7DpqiwyZx``nlO&w=`t-m6pSjbQ z79%d1vthRM!KC>zZh*Dkd3Bql=QVh?a7;~NS8YCf5u8p55t34@z)Wa%Ba*okpWThc zT)Td2b+cRRbIt3`k}A&*y?9)mfHe?jvmIn}FzkU0KSEK!R-{skFLxd{t?#2fr%CPv zol_e0?V9zw>)E=d6#jv{$vap{w&YibOvF6UTTYZ_Qq#_>33RlZErXHDvz%|JT!q;}8Xpzg7Tv0qnchk9k?(vZic00yz>J88Wmp1y2kK}gqTjbqniRhMJlo;f?U zciIhq(7ig(eSEZ)`}whT+F}#O6(h>%aCFBl;cH{9QQ->;^eC1`Cq+xZS&xlO#0Dhn zTdK+&66>0N2&=gvj>9V!M6;mMClV*?Ur3h4llbD5MBk3JtsY!23RGM;$-;Uqv9S{X$<|EgI6^USK& z=-W}LRA2bQm41Ljz#a&1R7kpdcQ%hHT$Iq{ImQ2Vft?Z!x_4swEc~#$ig57Ua=Oo{ z;>|gGgvU=KUDrz#-ILax!FA2j8+;BWKb+?4pWU?4k!<%TsT25SH#4#h>mW@lqsC@`d$pNv}`dt-G(DYH}HU zH`Ru&4lzgf_w|j%!+vYEE1Me6tzf18z^Z3lxxN{Ugh^`LvHCH0=M}c~F?Y}`2Q?SY zbl+q8GwXZuIq|65bdfOVaLemU=b77+k3cg?ckeVE6<5UMfh~7K6V7i1R>pe3&9L7( zIog3d66*mO{mLUXA;M8>@h^(Zg4E?2`h)^0I63y6-*cyt1^xVLIxn36`QK#Y*gz{`nA{9T9|Q_In^Es5Glt%K*6C!ecN?=Oomw~a+c^TY4dh2a~9CqNOSfmto?(|PG+uq z_llB~-Hp|jzm5K0`REv)hPoaacdj z%?913B0$yX$C{Pk7FHNhom;iYBR;>+;@qfo#@PE{z!9)d_?Tui2K%yN0ZSbM8R1q`Jj^8)YD|KtFLrEu zuAkV49Ej&yg!E4vwYN_+bGgQ9HYL_tCed{LMv3;kEUjPRUA+hkmJdyN|Nn6H)Mp+G56+@ZJ?C{Wy5+$ru3#i6*9;ts_fiWdtSBxvzMaS0YINN@=doRj;!-+7;B zzL{$>{KbULzSds*XX~FQO#s|(ESOHT;gm&K!6uD6)WVo)k|UY_xA%%40Xd(2TzP(; z!6V}P>n|BkO>b!N8^(j~pywkf)DjRWc(F{q@arvxJ#pvPRfQ>?l}EF!&a!9X{M>0#G?dX_COkYJ#0hHhY@`nTYWm12n{q z6z30X@m}k;L?g$AI%@qa{nn!d5s)hPzAt$1*1?)AStnC;o3%Z{z{_z|r-pkkn)yh0 z-q=eg_;_o8Rq%)PAWDl>7?@kzzCwNy`X8F~C-uMN=!=F((~y5D@_+t^A}3SzE7a=H z{}N075%<}q)4YKh@@g!T&*?Sz>NwF{ca7zMOxGAs^8iar@Y^4O0`n=|g7u~4V=2_V z6oq(d*_$|NqJ$>x!0`^-Mjg^lD==U|`T`Mcb48yf9&P#f?$ z)KUJbcR-^$^CveNETWd@dY&c)Q*;Ew% zvT+e#v0MS@60P8LB4l(btm%m&p;-SCk(jAiwQ-PW@##*IV^(XfsM{H{pBYK>*}Y=5 z=K6;Gk~Fu((qG6>|I~Dzt)25ZbYxd_k#f!>z~0f53O>KJ#JpVI91JY*-kZ^B8wd(~ zG8UV=)GPF7qum8`zMs?vKqRF?rn*#=jQ9drHZ3ZjzM$@@Y?kNAA1~}FqwdBNys|#& zMoaKn<2So3C$y{u;y#;JkqVFUUiIST;pGKk`hMNVvYjcbxw^}+=?F{fehd-9?}-Qx zrxGx?rje|4CT0iVR>gj5VOk?`XhSovQoyjOO{DGulcJrO~h|%KMg6(20DdQ==C%WmzDL;6Ey5n{tg!LRmpJZZMI> z<|)Lj8^@6)w@fRT-bM(eN|2Og39pCts809AVEaO=b$c_vd!Q51+V`s@zPvl+ae_KM zXR6tk{s2^T7aWc^t(nMDikEm#F_~zZ}DIQ^v@d@-5=$ymoupM-vM4C@t8nR4x zsB$)V%yUsCS+6`I3cKk^1o%TX`keBME zqtNC7iaIMaXxT;SRzWlA!;y9!Q?kMgMjved0ejg4rg5kewKnv7ZOxHYLaYNShxl3_LAj?r;k7K zDE8TlS4D0=tL6)h8+`EGN}SG0`yw2iGSzfYy5ZC?Plu;q1M~$)p2DiIN-@7HNqD`Y zsT0QvK)}4_Foof+l7o)!xtQODWCrj_8(R6GQ~QW*DB$w>t1Nh4c=6rZtVDr8?b2%~ zNtt*o;PDYFQ7s7H#$#V=JM`E1!+;IqbPfeK3JQ|$;*eK^>Yb+=7-M2%&yc1Lb)KVN ze_f63zhK40j3)i??H##*b5Y>;!_f%|6xE0E3R_rhe&;B5hr__u;oe!Vg_^{~gS&S- zf3|GL;`e>+RH5b2d8I|;VjT7X#C&(0O3-Kh`$AY@;1CYn?`um+b1QJUTfH2>PXr&o z7Q&`TAlyJ{wWezId-e)aZT=&SAv`5TLEqH8bE@s?DD!u5?QQS>069Fladh%;dtMvk zW*z&rk9Fiab_m{>hmGXax2y=@nByhkbYG#nbcA68o)YeuMPp^h4D4P0Iq*cJIf^K* zGFMm5+=%N-cpnXp$tx;&>ZKhEI{|9`pB7*#-q0)=5Fwj{=#`@vI=J%IRLCB+|EOEr!$}A%gbTB}MbVM#^r)i}(=N(k6#1 zlKYzqnM~KZ{pL~T_I{R8qZNz1(0}CpL+zW_TJ9_lnd=R{`46o-zSkp&(Ic;Pu;_^2Mu9m}f& zWJUUDBq)89O+N5!S^mD6@9pZV9OCsILeeRQL5APH=)Jn z`1_UhIyfF{rg%)WzAWdA49C+=Z#(b>l zH+_%^T+|>2LANc8SGi(uTULX1U4wzIT}XoNu)Cg|XcDFa(q~%Ke)e6C`c| zux7eChK7F9FP~@F__g0KS>bN8;mW}iJ&mZV@9m+Y0L7kysSO+3fUT+nz0tImYP0?(Uz)iEa@l->9kZboks<)kz{QI=eE_${#gPQYE@6}e!%1U~-(8nW@u^yA8 zTPO{D80C_1D0>WgG*CLoJ#^e6*|O{>(jB49EgtF)VUANk12^heuO|kgkxH=O@3z#R z*bry5D&Qplrx)xoM_pv7WJd{TP-$gVjdCcV`M>(Uh!jSduV`$Ob8B}~)I2YaeRKZ* z7jPG#u-|f%sM8=+K0y!5A^L2FSd&c87-RD+le!NMW}YE|#uX4gi`%O@ng zRDu2IrO!f^vOg-%vh@jj+p-@?grWrOU<{94^4h`n!5q+tk$*FZLAd#+*0Av8=?bsW z7RNE^vKx<0qo06QntXe_n70+T#wnJAnZ}F5^|SZ6#nlLVLU^U(G~NvPPuFTpuC>&d zKZv5!@@jMu-TBZw&cdxV|9k5|D8r%R)$bWo85j`3%PrYE7_T8CvWKvhMjJJV+Wktu zuokh_qV%gxm@va@V&DadjUwDIM5&y=$J}l$K&F85jjb+ATEpc0PqE@?g-Xq*s!69j z+upeFB>6u)_G$GtAZ@4o;L7>QhxOer9i8uu*whf*t?t2LWg2`PJcI|xTk~8>|DcTL z934i?Pa@hOs@k4ay~~}f;y%X*EKM2f-h=TK_~#==V=)p>oZ;V2p$(w#hVduA;<$jl*ZFubUUCxioiRDS?vdbQbA5L2j%wGA?H>{*T|)W} zDbOz_tXZWF@EsDe53=)*PL!5|c#FsZm*PpbLQPZ_7z~ijxkQ>RH_1^mFYfJhEcmZX{gB&rK|4mQdZhxy5iW(M~Bd-md z*pEb~2_ocD^*3R&gP6}7x}V-waaqD5uzf7A3>TQv>MGeTX)zt{uEdnslv>sYNXm*( z*mLXQIlI{fzeSB!_4v~*yODd1)}^BDuV(+_4k}jGKdqZ1RV8j0%13MJL|4tWQ|GOp zy!!?(ui>G{DUJE;&a*PZw*(YbRHoWL+if=_1Ma8pTFu}x8h9&8hJeTRaI7YKNF+5? z=y+bct$B@oban3+P*=@ui|5=(+V$5*-PVKrDGd()eBu>VN&(p_Gi|_pow-!S$MIef zm$CHp8GUAKkGa<51j@n4lkUh%i_%^rB6)Oqvzp4(2_d9tg;l!420i@9BR(a~05F}efbR$==|u$_wnB%~8MDeE1UAo%(vS z%_nHdW6ac;Sx5*Ix#lkrf!$3BG4bvswynA`=yv89RLgyz*0I4t=&v0N1sbDsOnQZn z(Lp8h)6+*qO1IBnyy)fbD2AoKc16QYNcyiDAdYGcu6$ix?ogoglFQB{J&H6+&0wyT zd{-`S;K`IV`RO@<#O1hHi!k077cU|rMbgb9EEiUVnLC46Pq&#gRUX(MZh1A8!A@Ad ztdV4&A2*8dIvbgRCFP|BM>45Ng=r1KJ+YFlEv2?S8<%Flw=5Rx7Ko8K$xzB&D0G zL{qX?K@ibAjm|`a$#ZDD?^n@x4y#|6@w#-j7g!h2xs;Z-zue@PkNbA#&fuXcJ_SrkUYdi-*NeeeN$EZx~;DcOQ+ziy?1)akgff`QW7qC}X|C$b21 zuJIrH6VvvP;^`~oQU*cO2hZbB1%q#f#PA@uHN2*D*a#hscA-IY#BtPW8%kC9N`F*0 zJgIW5{;B@7zVZ`t2Lh|tzXCk1GGMAG8Isnn>?fXS*ooGKYQ2yDLe#I(=Kua_oa@$x2Z9qOOlq^N&sqXr;jb;^|bpgOMEw(VwPXLrbf zG;m#Z>(L$O2tMvB4L)Yquol&QzLPUCCJDeS(o=U?Ik)8}mUP6TVdV$ArI2 zl+1Jq6+D|yP!4K*)LSw@SJ1BSbbt>=>Z4L1|7hqt%^V{u_oCgL=+l)&S<(i8L~vD~ zudi?@RV_S$#obf$oA*Shl>FsNAtq6uT}&vGxN%PCVz-UL==M9iHp=ZLfSkRn{}Qjj z)l?+t^$O%`X9ZeaP08i?!WqF?idOn~5R5Ob-~;Azv(o@XBTe}3NXVd0Z@yB$hPKLp zs)~b`R|<=Wpk(_TF=I0QSp9x7&|&;8D)qGvC@rfpee_>!PmUohPXtLoIJ(q`;?=7` zl$^_`sJHzEW+s(>PE2#tAg6R4?W*R>gZYiD^8CqO>p3O^r^@ZRN)K%q+b~y8P{ns` zhcy$veFMx)IXc}+H{MpcqmDY)Q@cNb9a)nr$W8vaRb^HW+(Iqmh%r=Fl1 z$*Z2nhjXzR3YBdBfg|tLcs>=J|J!*k zs)ge1B2@##R{2qS>3-m!QWOXy9ep=j+JkZko{g1>=0bU1LMhOD*6yy^etW8ERUiWIr_c?QIuVg?#Evf8W+|fg$LdFAjS97ew{~~!(g^D7UAH-o z#2>aSecjol**IP)5XsKDoLAq#ysMM=R^8m(ngqy> zNxomNp*e@OlTK77CWR=$>vE&2$m9{(PVBB>`YZ6-$ZZ?f@u*CfmeHICg|#LT_$0q! zyFqyEV}<;~?ZjKVw%lm%pw~llHrwmO6?EFqj^b+>i)^YS4;8Ltzjomvs1>ITNF``F z5;LlC2MxU$3Z-CA^M5-J!=)LNK=94z$fhsqXkdM}l@RX8rk@R)I>hh@rJig$NW)Uc z>2T@Z_R){IE>wzUhBz~=bv|C~Ak93mu(^2gRb1}sTPK3p)9H9S15&WTk^XPw(=sXQjx9z6# zG!2;vA`)S0a)3T3$&+|%Tw*D?4M27;rpE4df8Feu|J_=V;ZRdp#Ug=HOgcTx-BAZ% z)c(x`f~304;Nb?#8&lqE!LeLqWi=@%0ya9UwyW@=`r<05vCiXROtFZH$E`=~dKz7{3>liPq# z74vj0HxW-3p8&pyWZVkqKvCwZwMx{y_V&6;lwm3kt0b-dmh2J*yxKb)RH9Z%UR)p5 zcN7Ee%`v?k*AjYrB^Mav$4AB`V=^VgeP0XbPwh25H}<3fJaxW0naHAlTGhj&9d-Iu z;H$ljDDC5tDwZv+$-eBW_=E(d?Oh*yqNS#x4F6nnc)(VKyn|i;mza-|9pz@L?-Ff) zpOKNK`>Wggr>M_YRIuRbmdM_G_Ycnn@oNzZ8$PUFYnx%_y@$M1&b-aXg3OPf{fqay z(5WSrmF)~gLGej!iC=Cmp7M|@RcIyUjh%@3oG%XT@j0i0o13tD*~&Ls!&R%canaFJ zs$xNhmQl(3aXHpvEm2WX1f1mt&x7b_Q2`jsO%CPSe=8&$-aGU{{qCrixC;RMx>%mt z0}-zi)If}kj2o7|2=`$$sVQJ!By3(uWUhYv_SWX4GgzBF{js1E@lRak$n7`0of|F%B_ML?k1nl4qX$Hm+`RhZP=RJ=)GcIR5kBAP5-pUA1`(r7{*J|=0@({gd z7eDlehRM(KPnIFr=xZ`bG z_7$*sVc6AvdkrM3rR~*PZB0L#rN(5fW}G@b-@;M0;R!FbphM@%lm?f87+>k13^ZW_ zxCxXVliB*uC=>pO(azQ8)_};DjzrdsCk9A+gFik#!}d8$LjpN=H1})nbc(;Z!gFGi za6%+X>R`{l+k`SvWcAo;Hh+vOesG3l$6SxArrVaA&Y7>bQ6mf%waRx5(HD6`<~w9a zmRbK!Jpr2V!cWlaD3RcJlzW;|t-H;nxpGoF>-N~wFn)L;VvO12@~&K7ac9p>Ol9MK z*!L!xXTeGOf{)ds6t~9pZc){)8nVJUeRUDpZega2PAyvgR<$DB|FSA;LC>sji*|1C z(xL=-$XifOr$1OKsY~yzmg4Rg?Gv~ea&KI8C6PRRv&S|4O^QGgnX@c##GR;QGTdvV zp!p!7pjm?l_U_I^)CMtSzyTx zw86Sjx=Y-;1aJPX;{ez;9p$Y3Oz81v=(j)Cx}=Q||E7tr0Wr)4LF27*2tJ}#oC>?U z-qBieKuLGYz{2UUrP}dt)VlK7dyJ^dZREAA^_oZ01jllAdFT>)Xi6f_mCDGUUzX|~ zl-Bdtq={UHAQ+)%=7XfV?@c3^8LXRP{{E(uizj(ujpL%oa9UBG7p$o&VqBE$y}pSx zv6kix4qEKYv8`zPGFgwu9H#=mk;fNdVMcj#HG<}yu;WXJ38SRIfX|nvT1nkv ziF8hVI#9PWA2o3hza@C~jsFYpRri9kpX$ph0!B6^%XbHBoQ@jsN}q@G(FxZpApqYr zWUo(OCANIZ;f2hpeN(R#?kX1^-c9S(ErnOg%$WHa%Gz|?zUzE{!)0Unx79U@+pfRD zD77${#4*HpEA912vKKj%6eXts~;+ks5!7CwAWBsk4 zGJ>steXq;vbF-{<@9D<+*npVeCzgfPs(Equ!yRtxLSR*0aX-nrKz0C3j*F&>WMtx@XB=9r>Aew%ML5K zT_X3pN7r}b;YLI=;DT|n1LfkcN1ac8J%XxtldiUb%l;>To>t^637VFz!^rj^9@y_# z4Ef-%OGEB^f02?1yYDs*W0j-nWGC03vAnZ~@9SNl^=m0KRs&ost+>)WyG~Zy*VC1G zxjOC(ehh!mG?i-k$H7bf=!T%-o0+Pk-2GgS_OR9lJZyq`iG@>1AmkyVX*6RX52P|1`FcTP7eBb1Aonbf%|R!o{i-aja}O^UpY-G zW++9dlYX?7Sn(Pi80a!f_9eJxjMfmPvcX5xcHQNv)ldCeb@2t!;Q^*qoz;OuhRSIXl-7gzv;E|jUpo; zh$YDf%=`Q(=YtHYWkyNiWI2|20Do2YSo!{{s3-stL(2;SLKIS%iVcy@veJk5H;6iU z>Qy2lBC@?-X#?xOrLJPSCGO0Rt4~gs8gXGw2+rTX<3L!mlch9S%+-9&QK05%-4 z+n2r=_P|tMNKj@ZhqcBDV%G5@2?Lr?S-hO?X~$Jq6`1x8TjYIS-3zkov4(;(eWn67 zhNwwf1AAm_#;^I$d-9EQk=%f0sp(Z9mt3i+2Q+8C4+v3`(orTP))sVyR!)DV53=0o z`I6Ma=8Ep4OS{}owKy#ICfDiMfLEyCMxLY^N*$|!&*3*zagJ!^t*I+R>r$|vv6tR! zByk!#UA=tndC5<~NkmJ=4xpkDDe8?H%xmjSC}7mMfXX5D6^f381iJAK6~Uz%Hx}RzqqThth-$^G9|@$t0UGTi*#m< z8GKIr~D~+f8Vb53MoIC88wys?=y+$e!>-jU5)h)SeCWsWO7~ znWk91S8v@;rSfO=fb1-PtCt=ejGFi&1no?rIAGN9`F$&>OsVDcS87JTfE8a%Y$_gM z72vgYZAFc($tiJOF$>GfiX)KIT`#FmgXbLer|@c=-6paM%C^$Deggf41V2wYCj>jK zo{LW6J{K>nD)%^=CS!iw`1U4^a1`^5Lk?|xnU0G+K=1qKJlJU>eWUV&i@c_w)&~>! zZK8$ueCAKz{?-pFxpkHCS)6phbYQCrGaS1+^W9%sd=Xz)u5f)7-ZK)(jS9KeyJqs* zq+4m;kj(%)UZM0vJP8geaQmf=@`Au;c?^gVwrHl#gM{Tc!g0T~^vq1#C_~hE6TDiv zrw!6*d8#OViN~l&nbpc)R02Vwlp265#NG@eg%a7L>j9OU%km3e!2Pqt=uEMTx<1z* zD^+`9O9G}r(F6E}`vhR&6Ts7f$$Nv=f8;2I&|bX4>20ntJgbnN(9zN2XZO(lzIK(^ zO}|7OzuM`qG`5x>mElHx0I5ki;QBbtw057k{J~@4gH-c=eNNbszD=Ixio?FqzfO?= z#HFs_!J+$<60mT$19>bm_+Tkj=-696tSv)kCG(fDp`N_ydD9K}sXjgcTP}msrsC zs4$B(mx`TFY+1nIi-W&id@X9=WGOCqFuvfQc4@Nf87+9a!dTsmssup=wg#2N%>w=T zJI-Z!?~L4BtJ!yk^%iIi+Z(wt%0!ib&UML)v0T}G#&k=nan+`5Kn6Kv37&i`t zUJn1+W81sWUnDB5dVaN7Uhq)^9{(W>SSA7_0sEetvCB|v4UF_j2YxS=ygqcF;G^cT zU+QRCy|1Np=Zh(F=?;>GerU&#iH|a1!96n}{9J{*8}Y~2Mse23hU|cV2RF|&L{xc& z4wr)p1UmnYcpiOD6@qdt;56EdNvq+3Z@;@v(?$*#h`E#;w;#sXMQE9{dtp=it*;3p zYj7s=}S5!a8Hp;vzz`NG!&i&}?}JTb84J$DpW0Lpd4bv;7A>&NdyZ z_A#WlcCgrJ+=t$QlpjiJ{u3 zcm{6g6^reYVv##VDrl45!j{dizem4~4@OQcqhD}J6nk762wbGH%&FoNadbc4Woraz zYAtVhCS0lx2~JRYgiB}mA(Fykj4urytOFn-d)HPb)4-2?{08>LJZJ0CnRzv4va$7g_iVh9mbp!3Xd zPWUZ$Voh3+1sZe#Ss~{zmg2*?r*&z|YGY=uP*7WJUBC_Mjdv^_ahZxZ1n++*vKx(pSNoS=-L}QDY5DXtaep2`!Ufk z7%NS}I=HsaOL>}2b%r9XMyzGww|AP9nJ`q7)I%Hkt$KF!2-03IUD^ET(wnY9@_6q@ z&BIVNQVd8Y8CgO$b9_i~^3d)inf5R1UUQ>}z@@1_XrI&8ZoMw*CVL8!IGb&3A5pg5 z+S7I8y2+Z&ne#8PZRu?Z znu@a23f{skUEv#%6kJn8BqSSc;n9WFvWJc6vT17X`4Nqh-|@V_~pHXQzsbVfk#Q)k#>pYdbh^gqak;T{chVWUR}c`#UfZQ( zpMd>ge1;rsaB}2AVie}6FnWI`M?;SL=lI8P0_rOi$=kz%7;u}qo{lMC<|VQ$w$PP6 zYvntd4}_}G`#u**=<0f3q7X74aeoZs=7>@nG2)&VZP7aC#?^l*iYo}(ca6Rm8;LTS z^7zh{ktlZjGHC;y4EKcE7yb-J4vC7plF1VFc?uL%D>9OmU+O7fP`pXYFzxrH^Fdkf z>62)-WlQqT*K#oI0Q39J>=PM6TMF3AtLHtF)}lUhwh221k{0+pjqY)_24ZqFas19~ zZr#^#Do2BE@@=}R&hwx3ZC_JW|b(m1b=(BvZaJjPn^~Xm$d3ARdr|1&-E^lMjd98Lv zZMWr&1GPLDH^>lwnsEtP9AYn8UxD(^&o-YiYJ7>16NMDjdaLA2v$gPtc;ah=+lC{4 zLn&pc4Abhn`29(PBObVcCm6?yoVK=gh;B7IY;xT<9W<-lw zlk^zBsBU+Qao84{LVUG)`J&w=pS$+!3Bz*S`(W1>(oymTTWiu_XZB`=2M;`Yo(NmA z(EN5LE`uUBr-&YH1(BbzeLoty>Z>S?>-k=xY0L^v56vCl*(_IFhDKr7gg82uT1L4=Qms8+=li~^M9LxA9lkHjN zWwi3)eIi4Tzv^^<4#J}Q-X5~0feM0+^HBb+LH^IuE)ZXyURETV1=)4&%-MfhLd7X{tk=t!OyDtguDR_K^uQ zbV7@@DpU!a*1^k0wLibj{uC7|^^61;KVrjc321YJ0CSQtD<*a7tXlsusMX<`=Bv6Hi%UU@k#A)WNlGGYE zlB1!aX|T7B!0ELWwcvuU6#IcA^JUqqQZCEQDm-nef0X=7W#_XPlBVr6=8OCjk7b*Q zP1!RXxrA{#%jP@gls{xM+Klb<@jQx3@e3U4n(EDhdzZZO3^n2&-A<~wsO)D*@U&$Q zS~WF2osHw#_OOz&L9#gsN@Quno!>whCV>j-6A%*o7l9{jiOW-h#!um_)!$$-$kLA? z9q-S^g$i#Pr2XsG`TiHkLj7=+@K5;hsw+(2iTP9EvX{y}qP$9=*)j@~Y9l%Aa;Q|9lfRN@l9l zqvrJ63a%j%YKzwExUlrtPmXZsO!6EVS#^pd&$_!sBwB2&n3fRlysXH|V$-bx_~iX~ z8=Bua6CYKkLZQeo(q=%NE=gw_Q|EU?@(o<+3W3$ql^Z60A8+UIU-16h*ci+09!LZf z7Q~jjQnQ7Su-uc$875*!pa#{7er+S9F-Vl-xKf^~Ndjefsr4%4G1_YdaYUn?I<&p0 zb*|~ZKc7F?+rKIIR_H&=^72TG9ptjc5$&j_Jh1<`>8?f}5a<1>%$GVtdUDQbfUpi` zFC%Qf04&pU?$&kkq5;O}1|~yKjb9IFa0DuSwO{a(B^1anp)S_YvUUqaI$oI-LUb3a zJpCy~4gAhQkv-o`}RBWgpncxPjLM9sG+`D&>Lt}5J%kh52J0k5HE1jz30`(CjsgfZFLxXkYl59=qbA+8;(BzjxuI;K%rS_o_6sXK6lO0hL) zt;|H6_wo3LyhXh2oCY-q)Mf87aOg$^Jsvohs5m_cbMWDyialY(Su1cTSZYbAEuEij z^UI%M{vMb4capPASOEjPC(q>ZeV3R)ltCbb$6o{PY43R68EPP`3Z|sRIL#S+= zEn-Boa6j9ge%d^JR{L%Xh8yoOL~ee!MUZ&R3)T~=1VU+PWa{+z?zKj|L27YyT%L$=?4}a>UV=?VPB>6d-QgJA5 zV}6fk+%YjGXdzU^~nC`!s!KH>{988y?)? z@%W}P+%lBW1C)R4neNZX-@FGe$z5PEg0iC#c$JyxRJ4Pa!f9e0&aBn3`J;24C~K>~P16-_i7MAuutCc==rRyQF}b zt%~c0;n%|+?}JL$oBlU(Cl!oX%G~d9LS9Oye}4_-($(3VGmSZm{2bl7@dc53fT913 zQt&?;Nt*Q8Ybb`{DNZJ5xHI}EmkOoV#wi@~Vc2r_uh_PVaX&7#+o|vbHcF^Z*Yc6f zO==XdIrAB;$FSF?%TKJWWp+OO|jhC4H@hW`d4BU1J1XF-~z1zvT-7$+;pJ8qn2>@ zGeqK=*)Hm+AQ`WybS+m$5Ay zak(!k=liV=JMdhfHw`?TD2JI==#5iBK;ipv%O(YGkWbo^n zCEd%EOSV>|vnk%lOoYSZLOOw26hBMXtrdE>o)dzDHeR=dnqIfPdAb3;8NyZ_B|HyGjT`6 zCI0kY^?pVndC<2*y15LtlfyvQOjfwusLPequ2$U8(EAG$(&?|J@XRRYr2B~&mABQ; z2U#_noeDex>7F%~_&p-@cNM#fPnTVk&Z!C3Li>HT3az0Ox&SKnNfg;;M%ePsYgwJbT58KEBc`$q+lj$-@8y>`nSZQ`*M_rGD zFD~A|?%Wxz&uUtpW}<%JXf0V6Ode$gio3N_GJzb9YDSC-;vUW;XBc-K(2_MYya~n) zo?0~MYH&s1*!|1yOAPLi>I;i;t`oCAF(hXYcHqaf$uu){g+@oKDeP$C_l4~*rM^{d z>)47x(^pydoD&CW_q-@Gh!Uw$v!?UjRWXRR$cm@Yjao{t+S~7FRist@;X%lpt8!Hj zw&0JoYwVwZ4dtcYVxc=DRzuIOG5K31E#d{`@oi&Jq=C;#Yyto5dW0I*lmt6*i^|># zaGR$$htfvwxeCFzKVtpy1&(rEsFhTn<~S*Ux*6EJF+9V6_rFi@AnHQe#{66bRmLX6 z9y-LsfdxH2KJ5fQ{^6G=p7}1lcRXsQ4Yv7N}&aKDx^yF8YN}N&_8u;Rm zr!NBTjv#c8n=-F`Ri(pfUrVD#~!ijd*_sVF7#qI4-kx3cvmG27r*!-(uws+Z5mI$)zcJF%l$|;LFICg3!;7BId*FPVl-H~Je$h_$;)q=Xp<7yG+;!q1tp4Rk zVpVM?b)WR+czTqvb)ou*Fl2w9O^T@?V5DL?@(Fai1ojdyTrNKtApQ6a|J9+53pWph z*?q1y@DjEM7imS8MJ4RAW(@cpI~D-0iRWug68^v*)wVn!I`r<^JNAU9m7XVN14|hy zj2;6LbTT1{f%pwIMk42}W|&aG=E?Ou52k7pwL%^pDC184;s`!V`bHrpP3;Xnn!S_) zs<+A6rgO#4dez6;r{vU8Q_GQ(Co_1Rkpee|U3!Z^-q%HD_W_tCw5&x2-eo~v z$nH$V+Th7}=Q1pf0`^}@!8w1g^ULh=uSSwJvsA=7$dt_vs^I@wmf1fP-ah(%gr9UU z8~5UzWC`bnP|@IPBF(_>V%j}ur`o2TGOA}!j?9mLDft6cC@?s)3#7UZ9|GPl?e};_ zezous@RxsB-rUvl6~ug*`n`+vtZ#`;RHR+rUSrvHjk~*BlzcM5*Y&?^MI9Xq#nKtI z8~O7Ep6#grKOmN1?v0_t*-psoHzUoKlC?hlvF|Yl%FcCUY&9?a__FyF!)#ND4V;SX zYD|dAx|;gQ$*KqTs?Y5lG%H0b=d?NS~KYWZsu zWOj?XA@14sOOXksZ4S+z;!a|mfMwzpz^JyM&)-sfm4FZYE=_#;ye;*A=MDszJtX`N zB(4u11YD;~P4NoXKq{zz#V!FD@`#|mAp0QrldhdloCsebE$bcYci4McqP!}^>xfCd zBKN|nw&wXS{C9zYPYmiMKyg%68V$N$W~yBzaX51!BPsSk`YHHWpcDL@JX%M?kVJL~ zgQeULTOaF`%ZzcJyL;i~zg$Unz5#M!)qy9YD_|u-#~zUbt!kW~HTsxOl8eP*wR zO4s!J@glmV*&469dj3DPLm?)C9K%P>pQmtsvSu0TbiHyXHcBjh^q@=-eI#69ELwtm z1t8HL?=}eppAO|1)4XULO}Ki2y|KAQSbkUPS=b-r{$A!Gmn+Nb_?~!Iu1+=K@2BRd zh)4{zwg*LGN2ZwR$(d<>M1{TeSD>4eghY)GaMa9?V_yOJi#!U|!MvoU6pq)2wZ<5z zt^!L|)6{B;ADuIU#khSI3$QZ_jN(@RN~9SITbKWd7d)LUY;<+ZPe1Z-quIEf?{s4N zf!^r9_tr2YYHzhU&{St>!e7ODP}~(xI?=OG6MuM4u5b-cWW07Y#+LPLfyHf9$XgpX ze?P++LJ3ue#q`>I&>}9{pY2tIPPP}+$YfQ$2{OE_9)_s7?!~v5t^au89h*Nsu^bY4 z()!F;BvI;Q1+nhM*v#hcV2^b++S^Uf4oOe~c4M>J?NLSCUiy|^`)M;WT{K%Rg?&`8 zp*B!I2_^>1dH|8@fidIQvzs`BZlY#wn16m@XpwYqV!n(+y<9z$**m>_$*PZ=-dJ_O z;piNkiQY#cSLJrc?759klHuRKUS&2GzD|753!}}mJcH9TUb{*EdQUy1@SiHnME~hu z+8?uT#`t`!oiQoTCm#m; zcs?}y=T_Kjwpiz*)}(fG!e`81*xt;=RgjfVoBapIg!q2U`)IV|kR!c_&)*97w6=xc z*5vtjZENR-;l4$eGyHN!ivNM=*=u7>4!NFJh)phQA5Uj8oV}bVtie*=Bex!TMDK=T z4~HzW{v`H;ixTz>N8|yWyUG9J=nQWeP_66d=TyI;|E|*6|F=q%tP;`4bRAM$49(4= zZ#Td5e&krp@H99wOtL*wX}@2*d+O6g0oQEy78GqGpC|6cb~dy=)ajSGya31Rr`B7!*95e?5?* z8Ni8)2v#riqrA5?>GzvlmFiPrW`BD>rwg&1m6_q;^oUYM(Yb(!q0ODP_ztea`bylC zSlm4gvUJX)_4CVy`&X*{q9>%Ac`oxa}e0^S>@`WK9^!(LlsOlo}j~}{6 zTj4^u`|4jQ3rE6Z3CO)`p;$b z$EQeqRE_&`k2fV04*Zjzg;Jzo;s5SqO)}7!m|*Q&83SeVR}lD>41Cz94?%`@YjP!7 zw7J^^#_F2_&~%-eX3dY^_!~7x&D}=HXH-$n@hz*XgQ&&swl{H^^Map+M$#?K2aGku zALzgkxQ~&S>=1e(Zh}=XWn2FIQjKm29|-MN^%6sx8WwlZd10oBy~#FF(V9K;zC?F>`^6D!hhbj&2K{pdPc)*IVz@`m3|XKKOy)rzK(UeDIEennx_m1x?g{ z*=Zh3T@#Q%?Nmgqx_{gx2iJ!2rMWnI?=9czT9)nMUOuEE`1 zg1Zw41bye)Ip^-|bMO5!W6#)@>g7H)b2YSMlgfJ{ zg~u|njjyuKI*Iqcw^ZaQ*y!!vd5kSrOO{{bvX(i1-?ct>`xxz7C+0p;W+s0u;Kk~3 zLuf3rt5s$lzJ6tET1I&CnK03_gC<~k>l-?W<14d%A}wJ7p-LFxeHx6+na2T7R;0=Y zQ;{8{%2JG-=GmCi$TR|h*&nc%vE=u~?)B85$ouqbcqjCfWtF^>v-b^|A*=q8RNr&y z^xrabK;O^=xAFk7Y+w;pzr46FL;qCJ#ewzoPM96^o_lw#eSml@B?t=QaXdALhQsE$ z^hN$9$NB*4&9?e&TQFNyft2@MF8M02fHwWUjXMT5dp`A4LcXqiRNZ>5ScptwG=KV7l;G`WG>Q4!OWpr z{o-ie&W#RBCj02jRBL~gki*?332P8Q5V*c#X zP1VQdQqc2VJ=W^9)$QKoVn`%X@F(A&83+}EkfW#*UstrvwsRYsks9z9;exLWofupZ z{#d2XcHlXyKzmG!GXThCEbxb@0512Ajy;fNIWHCKfZR3IUZwF;)k@!H2aY2=<_&ar zC-^+k^@R4w#=PFa$;M?6z%j*W#!>Z@U0u~UUX{0?Ji|`Ncwf}%s5qS}+kjW@8T*3- z0?Z0D?*(^e>OlD|9>HjP()6ps_f?5D#wRbv<_ID0at;fJhc4EB<#T39r_YR?1oUVW z(A)$QN0a|ojtc==hLT{C$7=3kde84Pn48ziWc+jBu{s8Us7b7 zdF;;tyY_iquo9tbpO)$!FoRqpg^O4H>}Yk(O|wgI4`s(C-6kC|imS|wBV%p24%f;G zw2_7*2PH*$!KS|q$&#qP@!d8oCgDvaF4CxwoRrh{yY&dkEOf}!59Yd6Xvm)^$=_{@ zhAcs$R&>GCm-`cB4L` zy)6*u-L$yg@8gdJH^@~===70N%=Rq`xr;pIhc`08APusZs^JXy_^5WY3VTxaus zcRx7UhU@Yp9oJR{V63FJwDI#Eud{g^y(2HCR=IMgx=z>f2DDYj8r$F+ck*Y<%rK31 zn`vYcJ*Ma7<>HQM9?p_~?v5P&Eh-6;0ihzYM^!KePwn5m0LZ85)K31xEwkULoU}!I zpkxHs8@yh9*H;Q9oKqzgbr?Y!uiIimZCM;-7YUU9qKRg=i!vfD2yn2kNL zk%^V^Wdp$^95$SyVVuRH_}a11Jbh83nOl_L+bNO#(&~F22N&> zIanY&5oN9|uc__~wIsW)79-Z)LT8#3IxB0KALTKJw!GHmA) zVH)*fkd-M8Nx#tFJd6yb3z!^Xw+kWTeQ=8t6ZpHtMqeaYCUQbB zLkmHPC@WTtfoekP%S2I0R)LXO@wHQ{NLl0La=nLGGI^yx3PN)u<>Sh+ka~j_`sRk% z6@d^3%ualh;X}=Xp-$LH)S9N|^b~Q(Gun9gvw3tW7C;N0uI>C(GO*1=zWZ%6T4V|&&V?U`UEep) zSPdtr)bt-GkrgH7uKz^AgA1Var5YS?eVz~0sI0@dcMXk=--5%MUX79irC1_P?_VjP zG;cI5bJBtqIDm=0?_-P~AFKG<4Yh`8G!}#taf%&{&{u#v8Dd3-QUqN~)*Us(&HJp3 zdez$++O}ms`|U&DBkn&@y%rO2ke@)2mlco`=8Zl5!MvhtqgE+mbS^SMXE@bH zP5H}QGlV#P)|wHiuN{#Ax;C!Sbr%8RNFwX7DiZRYSznfV5MtG2Vx^p!WHv4mPqJf( z2r$Cf#>0+r_faIw)5m65+&sF@H${b>yXPu%(~-yYsFO9Wv+fs%DYx`dH)7AXg^>giN0Q@A_6#(Y9#>vc*lv?0cXz5;%*8#C}J71iVbRO$sY+E z=`nwOh!@T1FC8=@5RcM4%(N1EP=;g(9;X< zlESQ)sCEBP_6ohu8Ed*bswS^q%OQOhr78A~05Z@<{8a9zETpHrkC5xxV&HJf9pqI1 zFwF&!pbdmX_vwojymyMetsJ94Ww7jbuSJH4et0!F;;icC__3(f{IECSv#I-#Pe4dp zp(SB)EBxPcA0y}p47`csKZMA}1GtC!RbPE};3Q=vN;QeLReV+F%H>k5B{rl_xM@MP zw^VK-4;C(_-upSo+WcBdL1iX3TQ`W>nR0JAOZ7=WxU;^F15m?1`S7 zIJ8jD_O|}9+Q{B66S`-3h5Ila86)Xkt~9RWbd~JqD(ag)qaXKpnwiJ}zWR>?T(9OK`u$i1X7Q=Q^A*?PQB6Ec!5+2KI{Osk_D zd=t;=)&E+mr$^6)G~a=Y1AbLP&wGq79$5CE%fq_0h2b6Y{O1R)sAqZ_4rX~BCWckF z?5IfSt_&Yj#3_#^#%CSB2Puv*f|Q79WSOO{C;k;KyN`dTLV}RrfRquJcP@4b4It`j zV6FgmC9V#xE;#aw(?D^FlWO5+I!;sm(oo%JowDb!0_qw)?zzgbGZIsl z0>E%e(m40rxb{{|$C?cHxFYj^Uiio5Wyb3=LVeAdbsF?oQLd3vpzy5KE-uiBVka4# zKE{zGh=g9ujEzxi$|rP!Uv>Fq8G}jen+0%hL~e&^7Yzl?;6k&c^^)0HtQhR@>7aZA z1s6P0XW5qbcnj^IiTf59Z42O{7-pdTe$(7t@~rDEb)U4S)t@s@hC&5^jxWjZ(H)?E zq*^il#)@y^+D=`6(qQdFG!=8?hF}O)eE2BO_ZWWEWCg` zKu4P<6XILS96s0hL#BZMor8b~Fx3)!8zwZ{ zGEnuamlVg+=D#d>Ez`9D1xiUF$x~18SBIo=G^4ACY10io*0|CeEtJ?BRW z=H>z>Hfs$!y8s!Xqn+}knIS@Y`u?d_t*!ik=^*m0nC~uICNS*(SY22O;DH8HQ6ueO zarYF=e^8Ku!4#Dw9SDvAwlCxajKKhQJOyLDzEejqvaAFcO@g6OD6Tj2`}2M%rOHoo zc*;z#tkD-Hzk?>;3?tOf00E_m*!h0wiOJvMpsSWn zyj79FIxoZ0Xaob*$#B+t;=e4*903?vy~5^OG6Q~KQg*e`SfV6y9g{FQfw<26P+f$< zH*LgJoreGFT23&@xS)h>D^>I^nU%)Xu?V4NPTde_XQ014y5QTgp9?X5eT?T%C7p-1 z`41+=4X&t($3~}r1nWw3|F5@q9_UoZMTwe7=9S;G&Jab|Cc%x4Zir%?#jy`7F=~sw990>pe-o!^mGA7XU0tV&2KBwH0Kj^H1_`Q|kQL&^ zD$cKkttu02K=18x43?4gT9iB6Kb&Jh9GIwk*qFiwur8r0@{JGh!^YOd-#k}v7#asb zS6&zYkk?s4*A+65fdd&XIT=8~P)y2zAT_P6Z;8xFB9h$aaUzloOS^($eDK&8ia{%~ zQ8n*nZrq%tYX&Q@n{b7sEsL`gXJ)Y3>^TOdp@)B1lIJ_%hmmnNEbV-IqH*wCv=>a$ z*e;kkEr7B?>UA`{{;>BbImumN#@ zho$T}i1XosYs{@y_zCMXDbF9I;i%=3%w-$XXgfIui4S7zxr{IB#l ziw(SCNS`lgq|9IJpjmCrPwqRRL10at^!YvT6nsGgXSULXMkVDB^A@yW*Q+6j6~8mlKmfb+Fka+1T=@s|y7Ht{y_C0*f36EK)eQaSV`l z{=G;T%{0!c`)cU)fv!j4`*V2pB- z`CC!PkTajeV12Vz*q8h2_RZCptwB9?p$lj6D3PyG9~ViiJ@|?=Pl;9TrHGz|In42T9{p_lv{{F`Je!X&A=rmqm}Z>x(a1 zzuI}ruJNUta5q|lrjZ#SBE#b;o{g$B46FLGS5m?jW}ZSKL;cyLNLEVKwzklk-ZX#H z&6YTjon}sw8d|X^ROcrs`g9mLCq`d<*{Qe}D12N#U8a8|V6`DF=x!S zYi_T&Ui)#(E9N`s@4;WPH%9Ecj)LNtBQ!5jNT(P>gyG&~+=PW2yW&8-t!|ARh80Wq%IUUig>5?@D?SVm z0Pp$Jn|^;6@)1f}CYk_#2}*Wh{78BK4lW`%?k`;$I$cmWdzndV{57>!M}#!ON4eVse3Y)q_-H-vr6s8)_Xa2Jo1hJ zyM@$ufMOk?c{Rny9XYnd2`dG6(KDdtH#hye=>-@BIG%N0?#%80$2OQC){vx{Qvwv; z{4M93-@m{I%NpN&H>%sQM(zv;XmnY*u&fa?)|@YgR-&{}e@g$i=|-Uj=9$zapU39Q zn`Zp}_Q5ZIL@+?ShQ=Az%>g7^CS(XHogbwz;(rp&-#bN6p1?-&DU!z{2zW#rWaJ&# zKK<-C|NGMZ=f9bN1hC&GS`eJ$uo?G7**_s3S2-w&V%^oE_Eu&lr|3Znh|aR+p#L2hmQ$HV`Jf1L0I-jN68V}O$^0DXZ4 z|M!Cbnxy|*!T-Tp{xZA&GtvL%eZQyb|9tR&!Kwcj$c%!{J=*dBgfmcr1agKv(yhur z`Psj|6FmU3PS9FvsdL^QGjQ( z&P_+uWCJi){4Y8AcYM)>#u-}P{V1yg{55>}PD(r6FdeDYDE%Y}ymvtTaI=`3^RU&f zvDI$QU18(=y>JzRu5u2lwF#L!=P80L3JRL_JF9rFtD`ab!dr8)9*5Cx`%x~+KLa!n z{9FE);2-ECzs`=)6#1Nv+VV~l$wdgV1=XC+PWztkB36(H){4Q|(7Os3Cc)-SXwRzR z&O1$Od<&87Wzvv?|2$6GeMhJ&&~Bq?Eqcgj7>q*85YI>F)=|GVo^QCx zHFnSfOM9rUq@v$$^Z&CB5Wo$@3&MaC1e8Qe7-p|UU~H8YTfX6wdm?Z^29j9M zzu~Tu%dzG-e$zx_QIdXn<}ac1*b_1;9UaX~!p8Pq`BO#N*JdQ$&pC*L{TN@Mbx2M# zNG?`!GERQx%#lwnRi9pJFNhvfER;%wR1vBE6t6!H{PJOPKLOSMV+wm-_?5~3Un)%G zyBbhFr2D91bN2$yq=J+{+b~d=N^#4fB*q6D){VaEYw@t4;_-)p{ZHIW%?+1N=YTzyMZ9Kfb(w z%lBhx3FEs~$9CQ%*Ekfo7#}#ec#j+5nUCY&a7-|Zv>Ban#ST;L2Jk+I!dcPS8;*pV?#3X>>(Ea_#b57lmd_yc4y(D ztH3-T{|{0o!s#1yka#^$yX-JZN6U4_}XOd|_9CCQN3j z`8))ZOT#i+rC4qMOpaM0%WaI1k(1mH_BHR{D)8fYr={hPs87*^cLKG*HsUNl^M(PQK4?(XyFMEgLG07Ps)q6DO8lmmc;htfY% zDp}Dz4?tDW@}?_2?Sd`r2C{Z{IJdG;sbNi)8dRtjuI()usxjq`p7NvJO(hi&qwisZ%&Gc&h9y5iul< z-3mmGUC>f^#ClGNy^kXntmXGxG5##lw1CS@m=UcK2mp^nAZviCH%!zB!y}3f2 za^cI=Er1Ps+1lOhQe+7=l#Yv6E&=x%pM*`AESWe~Xp2XWZT#FknpM5EGGSwX)1p96 zZ6Cb1^j3zyjf|N?`f!UG)R3!A?Ql0gu-XcbId^LJ{u>5r%d$nJ(?}bO6{m**B+?Nk zGVK~oNIK%Tl)sy8dnWvckk{EuPfkt zVfQ;4_<+vgbaP$Gy$-LG_rwmH(SVdq>I){NuQw?$dwEGS6LsXoG{yWv^XlSBLau8I zL0gN2k7^$Vs%q8-gp=aUBaTvhHgr$+X{I6wIPlbYS@*Uvs!Hn8_atjij}cX=%7IO0 zUYk#$oECyY=5k@Ve-;m6ae)-huqM7B0~W0R1^u1D2?n5`4r%nfdq>bHx|(*yhJ5_lE=OM(_u29C{h znx4xXfCbcd-1PP)xCaKOBV`6{SFwZYAYd~deRA$HmC$hq0xgrLcjV+ryl*I{gt1M* z%pic)L0a~;&l8jIi*{NYo1!2GV-;`nl@JL`TXLTF_ zkDT`ar?)_P?Z4@wnGRgcNJ_j!vCAm z0LdPeo?iODH=zkXb*+^a^`#-G>~g;?wM?HOlzo>e_t}PP2z2SV1x*qsST^iG%aiWr zs)((I&do%|1H@11z5u%D+;f(2qU(l-Ld3@{Eg{RT}r1z3QSxEX!q zV#jE)>xv)lO)WL1z$@fxmx?D<8_hOi4AAC0zOs;>08_BkEvXH zd`8c6z0>skQX{5eiQmR| z7h2!B-=FDd%ugvH0Of2ow@Qd&DZpJnM6sK@x9^pc#%&gxzGc-T+vn0H_tG;x7x^cK zI=lN=sDGTXRXyJw+)xm5l^oE@VfBqu2ecpw5F+htYDl1cgY~pzZkTVL*c;96DvRt& z#*B>AReB%oTncHIjR)^mgv9yNca6w)?6=?&O>X^vw5LJ)0|v@OCGJ89w#`q!5vFDw zdM>n8TW{Ux2;pnGf2vWf=0~ z4Da-s5bm=2=8xHF3!;*YJo`vp_Nv;?4!EM96as)!cT40NLtR}#)Od{Jv1fXX-5!_d zi9-}RCSiP(JzwhSUhG-8g3hX&m8a_%`P8^ZhJ5Zcbv=qP1nX&1S$AP>9nkK5xp`Bv+t&Z@Wj4N^ulx^cNo zM6?*MZt8LS+2L`J&gEm>8RWAqztpWuNOTecZ}&6-|7_Hez&b2H%21XB>;v?yh1Gp* zhyxk`B7MHcVyHEegWB-c%F9JKe%Xc(BT2F8?Hm7-dkTVUk7T-f;C+3mR)oJ3jPy2= ztVzPmic;Ws7>-SH9QEE3_}X_FMw5&zF40uuZHEDrv4T8k3RNvt3!&J+K*R*bx>5c4o+dB#|IMOrW6Idl*`(dIEvX6n)hvNHU+5pb8PC z*-i%+B#2hvzRu9ccs9-NB@8|4u!*_UY~2lhaA53D!tU}u;S%^ehuR0%`0+*g9qzy? zx0do>lq{VcHO6@0Ge726XDDFLjU;!6l4MIM@@up_hGH@P>R7?As1xz4%Xuz8*h9}1 zCOsuG-SWZ~5^g{wG>^=IHeXM4qLQEwfdzA*RN>@xJHH2cwKc=P+U?m$pZuW2GB3tN zhVmA=w&1h?M5%X|-$Qj{KavHl!%qw}Jv~Ing)Bm+Q~k}zpuxRgD2C$Z>R>E#`;A3F ztD3XYe7yn*_ABGw(3R>=VxWD_R6W;eQGzHP_Up*3X`Eln-28PArh^2I@}&oqwW@Tr{NN$SxT-6#@gk&J4pxe`-Tdd$R7T%eP8 z(Zi!4YUt8SR-F}wO_Ae|HO(OQU;e`Pz1=nl=KfXdJQfo)In_p>Jx|Cx9j2}5t}J&+=OTf@hMV+~7-$38L+f1ruv?oC zq$(@w{?@i-x8yr0(I6hEI|FRj?y!Dh-OLoRfdZ)ARyOuRg`*5K+Q!U3y7|Dpzn*A2*EqmDvMql3 zt5@8Pvjz4z#(d=_QDr?mVb)I4Vn+7SDAx18!DXMf4c?|kU#)uetcndFDn4vJ=IG8 zU*Mw){asJTmjg2%m-#08zRejrWf7X3Y-T_)SAJrue zJ%B`Pi%zzHoPFCGMSo@$9IrIf~3w#Av2F>56s(x!d-E}6dUQW zgCKl#0mISYGSQAe!ORy7Nti@D|#NcZrnxPv!3uY@aH{2!MGW(EcGy&agr? z*>rdP*`L|YE;FEV2n*Gnb_n~GpO4B&XT;9VZHK<}BnRos8~o`|Mt*2W)`~y21fhZ% zC?qD|fO5eEBJ=rNbMAz7Bud8`Dm0#RXT1Y*7@4eQDj!Z2JA#RgI34soE4{Br(NSmX z=qy$$S~&INmSIw6oSnHTX7l(0WB+oMSy+4IGStmliETIP*f?n1dP-S#juLP9s}y56ctE#`v$&%*P9I^b0I zQZaiCz+ngtY(npz_I{ zZT7xmLC}?L@KT(j)xe!aCO8H*i#D5X{5DPmWD(H!UaclqL+6Sg1Wbck5urh z2DyWX?-}pAHrKKSv_!|)HK4Z>5yKm1*V`))Vo47M3HkXExTt013Fw}V|LR@&onCY$ z0G`!YUh2LJoUi(&j!EpV=IYS%jmCUEoBV+l9F8nxe()%@D6{OFsxD=iB&C{{a)k5` zQ#JP)A_$_=HVK8A@(%`5p>4?tN>am~oM}aGhP0`H9@uw4)FCymGAWiHrTO*=Q_TR_ z2q}}`G(0(XJ^(`1EE!mQ87CMy>@VKFW*9%p`ALRmjNlv9s==k1{@=PG%6}9E18s^2 zG0231l^G`S<*1p^P3okK%VC^JTo!>rF3AaI=1RLbkV(XsORXx{0KN3leY)YuMo-Ye zCgV=S{JWnVdHq@NDu~Cl(T0XQtKxjtfI&WrVN_EB_D)OA-O$+Qzg%dXZEH%G>nQBP z#&vFhI+3J`X;b#?5tN^nX3?EXX-YyHtJwT)bNu7)IRt+a;Q`SrXsbBYQV!_D(L#pT z1#*S8SR~Y@@kf=g6Lg#Ox4eiSF1#uyH^RFh0>~gTXPg_4CaC$P^kQHKKS&fiS6p1U z@%9y#m&@_nlpd$Ck@i;q_AX^Y*_Ik z(_$Rg;#G7eR%**1)ssFtd&1#+DIiK15PEvXl#|fD>^#+q1qU4hEVEB%>UlK~CW3(e z%AiTUP5Z@w*Y2?)lT8#l6i9;s(8cKlyjQD8r7>E~n9?*A@IBKPhdkyLX&MzdgRF*!@#vw7ty$bjDz=d-hyh%ebEVYE1oArG5rNSgBaJP!v$;U}-Q=@-W}IWvM6%qAGA>pr9VWPI;cO z`Az_zJKiiYD1g`)BkK}p`#qX4xy|)-B&8+I`$k~TYg&mSOS5mo?1eqU0AJ}E4u^B` zMtk`y-s)4KMh{#Qf=>(z3`_Xm%&H*C3tvF3ex7b-tsr}EbYFoeD&a-Zez+fkGj=>i zKYG1xX_*AsmG60}$Bcm&CSti59qSJ2;a|Ov0b%+pNXnJnZpll7V{X zGO0(Iy-Ed)&7qobV-#tVqy7W7D%UXbY)wUu&93IH=@)-%xjd&Wf+^d$R}Rw#$*yRs z1*t9mWHWr<6x3PPw0LRA-vBqH1xT2z$85POGue1T^IE@VYKqHMv%#Z*8tzn{cn`Cw zT*KsUhV@>lnyI1%%3`tr*5=Q523}oI5qOIBo7r1DRq*4&w8-t~pjJUBDU6C=AxX+@ zR|BEl7ID@e#pdjo=U>?>?coNYQ#C_=Na50OA;F-o!eax@y@2&p z8`;C*xJvtP$TqZQEHm^^Euo%tlHdi&TgrC@Zs;TZUY;iDR#HyxF;l&LW}V;g#><3W zl_=b*xg#^Ow}S(8E@tic+S0HrpJl_a=J$&JdtvK=8|9=uVTPWrvG*+&a9Ad0D`BX` zyHQV9idslfQ4)Nk(-w0KVT`b1N@8#UgJ?~MY{;E@8+}-lZ)Uo6H1_lB$@>?4iU#V^ zsm;Epdlb@kk_|S#OdK5|9!??!VJHnup@H}6J~~obUN)97&Nyp%5y9?qt%;==$Z_ay zrGSN2f8r1!Guh}C5RQ#WjI?dx`L-z3aV(!|HUR&o29oQ_q*H(yG0rt$P>F^1yTerT zsB14BoNNETP4ll8Q4j&914EIMU-kp=g@#^r5N|-R!PsfHgtw$KaGllbhyI&l9&DwR zYIG|Z=WObM58ZbHsHmt>9G*9U8EyDY@&r*R^luWCsv&g3t>XX7U9@g)&Vfh&i};!z7Vu362%lBv!|j4Q|jH-Qx&Fx#KYe5Ew7&pHK|$ew!BQwM%>J!f zUH}(Tn};W1PED|QqdO?;oxj-r1FGmj7@Yq-*Z<+Z#b>nZaxRj$qQZag}fc z23d$%M5jk*FQ&0uo&Z`r{rzNmNqoA21RensDv_&dBp6;tBbZw7*p}+w7f0VLKTa+< z(4($eYGmp2(MD;j=?d}tqsPF9jJndw^{sc7kzJKoGH|Tf8^LHmsQI89{QD0TRp1V$ z;;@U0Vwz4tb=BS(FJ35JHiVH?%wj<;43sn)0(0{(63UeFU3He~8|(Waz#*AktKDea z+}z1e*D4PCrd!kBgbme2va~zi311ywQ$XpCYX4+rBe}>r=bW8nV<8f6YJiKlW;5&V z2@g;MJTrh`7dNU0sm%swXX+&77e|YuzLVqm5l$I!XMCjIm-pB!%N@b%A7x$^bnL=1 z@(4#ts!=Bn+EbPa!qEuoPusnvf~mO?%__(x#^^ZE@KfKDuKK6Dp@ZEvAz^049k{Kz zvd%E8#1KI#Dkv@niEN7SLNK>+`O)(xCN?$-{*-`;i6|%kJ(?f~2??a4V(k7(6M{$E z;;UT@jPVz`sKZ{DJ@(Mh(1QB%gorq7A}34EhUVr}6>hsD)HP9-vQyFR85vxs%tvt6cWYvImp{N)ej;3c~hkBe39D;p5({n9| z6yj!FqrT~ck2iwZ1VHt@a>zr}&!gort%L%lsHDiku|MqCM~oU+)ZD~e8KfZJ@*sLz zC|yMD7zbuN2zw(XLFZhB!R%HbDt z09vWDos*!K7q6ec-?_!S4QRh0KmUDibv_7hS!0=ql@5ALZtT`>f>Nbnp1;DzmaB=} zE!9Fvkk_^$ds$yT{7Xv`^aMJ?D)~$Qi|FsT5~^5y>fXX0Q->l*W|&a;#<1eDN(B!s zoXf(lr$Xf!+UYqC`sC#R&ViCzMHm|&JwQ!xxf3B4-XABI^h(&_6TT{Vy;T#Ggb_&{ zig@nYz>JM5%CMR&p6UiMuKd#>Gyw1c!ceJ_myROvMO$TO#)$xM3Ao`^es$kknJ{fR z+NZ{y<6|qjrw_z(cmiQFGb$=SFTRDu8X26eXHA8{W?5QU_^B_sb`B*BD-`8$y!5(7 z#h`y6M&1W*D<68cS*U!A!)6oF(<1^L`19J?+k3KMtz|)0!F@041h^}Jv2`HBb7zR$ zYPA*HdbuG<&~@!Q-$Io)tw+Z!asrL2U&orP)q^M0clR4_zX{n(opu_;bYxSQFG_;Y z^xCKutQ6A?Lefe}(vbV`DSqhXSf;fo+3=K08kc#Tkt5fN-73g7+afg+s+~IO!B12u zve*}BXB5QkST&pUF-K$9TT4w3NOKl|6Ionew%MKkK<-^X8-{Z$Y)5ZaYnVt~qh?IU zA_Oe(Go&W8({n3Iu(dkcM29a7zm>736zRrr^*`Lvyaa&KI_I5Qvz6CQY*uB!V;B75 z{WqXR%oJ92U?LEm23HkZ10;R2I-YcbGk6+Exv1e})7bj&p#*%`xC~T3Gzq${zw(D| zeF}!U|C#;l<9qA6hqQlmG>Fq&DEFl^==zAsrAy>s0Db-on-Mo?0Grq03hSZXgY9$$ zu`?i4|J!1(Y9Gh5?UV>Bi3Y9D=@fZ5`L^})6S~IdWe#Fu2v3(Yq2Q>D$u@EM
m zW@G+DbSf1HW$#YEoTNH(m!>=)oAfRaRK&;pg&i{vFE{Li_bM=GGsIsl&)!b#+*GLn_g|@(B%KBN6E<<-6%yvXxAOG7ZN_$dT*0L2 zdsPWJt}aN1fe$CqwVA7EzGv|AX$_@DNe0H#Hsb-e6Q!9)pqa>gAVO4qq~*ju1X z&`XPb7e6AK$_U0|u09~4%BCuuj3g>LPFQ`W4EKt(WTkL*e?%(dS1| z*In@;p5`~?xhSe6Zk8Q%X60~r zXbv+&Po32w-@s>z8zD|Ik~ygt{WF!dexjcPeKb#)KJNQRx zsxo(-J{hO66*wpbLwlQ2pG0sVlg5-AG`!J6i-@_E896(EG(!mUUN1n054lPulZt(t zb4dzpo_U@O8~pH#=%7U4fpi+oQoW85D_f)E630V=$NzxQ7qkjf0IQ(?&^x?#4L0A@ z-wbZ6yNTdS6!o)MS2DaOlCAZtSB}@bC~vMUYHDf>Y-}KOivvq6hLN2DTU;5NoC1Gv z#Ol=Ys}>s!Ol)v^A8rc=GkjYf+MZC2S-w3gQjQTKa);(}0*=QG^;vGXn2j&8b8&g6 zVoX-xb-j??a-2lIJ<%2c0VNR7_9z@|AZGd!5AJCR#L6(#sG4C0T)xnkfWYZ^Omw<- zArATNQr)vHQjC6CG5z|7Wx?+J&eS5>Laj?a^yygn>ACd^q%#g{AOyrm>kpWIN_Kwl z{Q!9bf>9f4Ng;cYl#x1%XVBnf)PB9-OZb-$wp>_-A>nW`nUKwo75y6?QW({}aT?^Q z$UC|nd*;vr+m{C7B^X}1%Gu-pntEG z1u;ihC)nQ{YQCNEl23PMS9xM~e1*Mdj%9cMI}?O6?{txAjf1x}pOM%Wi_kN>{khAC zk6gRwj{#?+<7V@)@@q83weY?|Vw^~A8i%u$tQwZ326O|tB1zD zbquLo+TwFvZqMKSYO63RWcaFCGl#~~;Ys-dI0{$r=9F)nqAlsee1(2*2an5oB*%T{ zYWJ2&vyhh;-p{k;aP6n30g1wK6D?O-TH4z=OCPN|%kK5bmuyuVOmXd>aBxf7W6w>- zyo&}Cvg5XtYc2-wh>~$>DnDf5Ua3BJ(3?mgeu4ixAp}^rPcfLzs=npdQw8w2xHvgA z+AOQY)VJxL^38%X3tV_qZ%$Yn@f}BPP+h1gf87AGleLwV3EpKF;qKN{FTz))d>a1e z_s~tYxfhQOjtX@iYzJQ>Lqa-V`nAiZH3(L4eDC@6CbQLhST8I?*Q`E7sHR&7nt0W8cn}OpHxTa^Ak%aMAUMSr)wx&dkq# z7D(EvN_p~HnE|a9r=Z5uWW2gK;&_%uj%2xzIx48FsiS6fXe0AEeVehHoGXA73*TaP zxEIED`F65*$Mmr*`|VyH7w_?Om4N{#URm|!q26hW zRRs0X$%SS=R_i1gY7ip?Eyowu?i+1ure;r8y|N@|f3Njh_wMvx(TMDs%>Axs!P)jZ zl-ZXRujnh?lGwzYBD;y+}bBhZNILS9s8(NAEY}aAzvwg$(X4 zuA9f~M70W#p0IJDS#meSm)_bgO8vVRK+QCL$>lY`sr9R#M>VMgpqAKeBNAnCsSuqX zNv19)?-Uxfu?NV)d@d)+lIZeevq|Py%w~E>A{Y!7++s#aaC58CN zwKaRw8&7NDceZ1jQDb;{O-9Wu!vVJQhEM#b7C>J}Naz%6kp?9)`$miKkyxREb;U}$)PKQ{9 zHW592Bwp{X+p~Z4`o?MYlyKeW88O^)bcwp~q-W0X_a(BEr?X$|etd=I5#LL4;1d@Q zB4ws~^K7FPob}4}t--l@m_a$BF8zx$?edb5g6*)M^&0U|0t zE`)C6P84GC-otQB&cZl}maR;4N7|_ta+lK;I=yKiOmqFemhaXzqc z-CXD8U5uqGPcJD!gsgZo2uYWg51;O;N)X-QAgr8yx@nD1LS$-MB zgm%&|so?d>XrFwkEsr*FA}=_jRT`T;F$IKDj?hrT9Ujbi{1tF4r`=^^H4OsYgw`oJg8i8hShoKiBX~~o+vz(*G`$Vj8M8Jr|naG1NibZ0_;%X9$Y!8x$m&k$>l@;PLlynCa4RB;-u;o+w#ARSMT4(%VPG4 z_34MSP>08y`Hl%mZW3FSZa$SU#37C9_y%e<%^vEs z)YJ(m7ECt}wRroWP2kvO?I&1xc#`BahHsVo-Mzgq&U;J>@&n>VOZe9F@A~Q;- z=Ok6s@sWWECOld!mgV~kW$vZoe3v@rI6UqROOgJb!QghiFX$8XqDHJA6fj@P&tB2W zW#PU;xII$&23&vLSaRM8703gp&qXkCaN|g8uQ3`d6 zJsTZU_O9ug>{9ibq&C!`mXx9WT@7y65dHKBeFh?TO{76~O8B2#sFmW-zN<<8Elq+P ze991%B{FokM6dPa-dM|^d7@6_n_bI`5MkyeY0#oB-;rc#Yv?mVIVE808}_%po>1Lw z9%?20cNll{Eq6Q9nli0Po%Nb%(Wc~F5Ljtd)8Mh=!Hz337Bg~1d!73kJNjp{`BOmn zhkzzQb*H_<-JqiBg6^Rut)EXMS!wrNzgn}m34V}d(Gneo>(u5j#)4|D_Ilmg#2H(E z=sojNx-eVwzs?=~@TK-b!OMjf%BlBl4j=xcZn~g4c4XKGYPRw;nLB2tA04nO5fa@i zHb1|f6?kc)M5et)4*7@qWc7fv$<5D6EeOnjDnSl}@xP#t#mM%}&sC|u7z~$lXEqXb zl>?CujKYykVioRL3N)04tYE7yI8p)+I3I0=Htq{8F>cihR;t^FO*~8YecHFg1dfFf zB(LhoUm`w(61U~3$0$gWlIW8|wdX4g_5*C!cMq<+f=hbcA9JqAbC~x|qkT#O8BE(u zk<*mIxW{3dxtO;#Ak4%)4^S-EimB<$g@sS=a~pk5(KAwSr5iXP(QTj|Xo;{XVESkU z%03`NR+7>re>q3Y2EFi_;BOGL|VOtP@P>ifb=4<{1tNvmWv; zg{HYC8;opmo)l(@`Fn2{U8de99AU2zP@#lh^%)Ly{ktfBanvp(yfxYHu*oWG;XHP>e0J-~roAV@Jf}aTWo3mXP|@p>fzRzccCWj6=8DEaeX99D&(7kZI2h}Z7w7M9WY3T(c0=xS+6oohh?O2z!Lyt# zDryu-z=$064~Gz}0q3=vM~WEY-Gb`fY6)P4N&=g3jG)6f9a&D;Gfc;Y3C*V_LO!`r zeG?v!rxN!lkx?WQA~Rbt+=WTOfBDyD%9h4cH=WbLBC<74hg_W#+x>>Nofk#H3A}$~ z6;k`BRfu|dh}Elz*DIbV5gNr6Nx9I*bcZ<%&)!g=xX(Z2Fp&cQIZL6W8M)cT97^4+ z*jDEHJ`7?1DR&ty{F+}FB%_LRxgADgv&h6~^Z$|c)j@H6%htHN2X}(I2e$+Z?(VLG zy95XtLU0Hc+#%TD?(Xg|K(L^}@*VDd_to!y?;oIws#7!j?C#yYdabqpWS4VZC`@Rq zB=ttN zZ3Ei|q9o=A=i5)aNX9vs5vGL~^d#WsGbnhR6qvGVjdyeOq>&R@^=8z!<3VFk8)`Ai zn~WCA+v&rW*E<=O7#|UI5X}yVW82Hm0G%ha#g(9qY^Jos3GW>#*K}O46WbU1Fy0U+ z!=()CJk1-6*k7{KW9+JaI)(jnp)xA0O@ZZ*PHN<}8*{(PhDuql7& zuZ`@C^N*N|yU6{NV>jmGbkaJt_ZFrQ|lm=2y9j9ue?KXAixS7t9|2t&# zUqgnKxv+iL0ysYG2Z6XD_z{cAAj(V9jl9gkIaL_>N6~%s-Ys?H`XGp#9D9!k`%G7^ z%*)^l^MfT~%Ga#$^Wykq)s%L)k`^Zge1}{njRHl>&XJLM0X_sSRg^!DrFkR06)UNX&Qoy^GxN+#8?!j`SlwLap~{z{^&+ z^*tj6_;1i2*G%NTp zbHi*0oFtLeAaS%7+eptmQC9MsmgEOe@k-yh`n8B)0xU954hBa`mBHoaXIm6SBl}2= zvanVU$=vX^kFbW@Pxu^u6ZFD7zm+9A2`)=M1Q3%PAR3sRB1CZiU{|cO176MCE^`-@5_;fW|w`TuU|D+zjcAj zj6>F%^@a8A(=&k5#~PXCs>yhA!2Ia5PIMUWz)<5Y5Kb>`4s`LPLQ%$x6_S~aryTcJ zrUHHkW4OGa-JKl{j}cm0Y+ADWZAu+IJvE;=#Jx1bhzvMI2E{Cv!sp}B%VyVIAyIk` z{X9u{-9U2yyTA5QB%g|-@@BT%KAw4mtiQlo`e05k))}XKNosu4LBv_tMM6tQ_olPT zC~m#gvnTGMk>0EFb17q1X^PWHxEY?>|C zMwdNu(QrpN*Z!>jH7Uu^TAlBH&(kehH4HYaFMcE1#v?6WJ~zBRWS4&a7Ysk(Z^W$6 z-of-O!zDGDjUOj_AFkgG-{|WXt-~Q8jGQ2QIs4eAe|zB!_z3sZGYjw>@Qtc%jf{#W zt}OmD`uZ2n`W6AeRS3eO*81IFVSiJ8q<_yyII`aQrew+@XWQ+`?u*NOH!P~c1%3%HE}VvwUZ82UqAM#GcDoQyn}xMdf(EDJaC2nYZA61x*k|oBkIbR_+4NPO z>L6FB$Mv)!XJu4WHB{P1OU7gtc$d;{IZSV(y$584{a2_EocWY~*jT{E{+UnAYG%Cu znpHsnCz^yDffD6=S%tk2hZMnkb2&h&eI#!vDu5TSk*dAMxB284mVG$RSvA-@v|rzZ z&Uwa4akx>h&U>>N>b`K<)^OC7g& z1eHwbJ4pwPKnSoI)@B?ie?Y9aqI`P+Et`-TJojymA^40pn(6yHTNtos5ZUu4#oGo& zr2y}-aT5NEt2+GaKjmSVh;4G0!{6_H2Uy0av-yP-z(xAa88K){o(2I5TPYR zYE3~N+ED0hGg}zRPJM%cF`SFp_ zIiK(U&9dmv1%_j0Sc$FP|88Cp{yh}Mi0Bn>B4u%7RC0YjrW2_1Vu*;%L^v9ZeVUq4B@Ne=yGs-LCI}R zK0XZe;}4-uO^)n)uKOhAC6h>#3O{w@sx;VLPM`L>m~1{`T>LKLvrgTpo{%H_)bsgQ zWUIgCF(Icr7oMZ{ZEv@2NgQf+D+CFGNKu4gik^d9LQ>6z4uY`Q3>)*G1BYN@N~)*V zDPY#u_dK&3hPA+^*+>ruXH-}o{Y#!E1xs*8(rG%~(B+mMlze5Y0M5@!~Ry4jYZ ze8C?PT;YPsprZRJhH%pS1ORBjX8t=~r(T;A1Eom}HJ;C#KAV|v6(XPuaiaiZXfGM# zK#h||Oc{Zghi-OM5G-;OJ1{G42@fk|UP>F$PRmGFjayePQ6JO}+n_rpi-rP=k;V6v zJ~G~yg|V(^Yq4rdw-@Yg=>8W|fyWSFJ7jc7u84`g(xM_>hlvs+-7fj+63VQekd&}O zwrNKCnXvH|b=mH*HJq@BAY=u5k_O|T`H!1BzgyIa_*@Z~ynRJ>ZPXs2G@79bvwBnH zp9#K!$X)KBCyc<+Qy1?Wz#w*m4&@uIqw&8b$RGwu5hW$XsTM<2o+@L+iTGO_+Hdqz zWc#M^Mi>Ki8;&f3?taG6lGh5dY%$%xg!KgJKrxZ(D-b`Hjd zz-oL-(U;;UgcKzyXgxXH!K-@<7WT6DVHvj(vxb~)jMAsIOcTJa3~Xm>{Ruwv)&C{n z96^^8-m~!z_xxoG1+x|H=^k5#gG3B2tCRd^*VpxgkTdeoUhb0W4E&ktC5H6i!=$jl zb_wb+|mh?wCp0O_tN2o94|;FXZCNS6ziRake2vdG2mpyI>5cnFPwG3 zExS8XS|Yur+ed%?&Klvb3amq>3QumPtn2vSSk$kHv-y>R;=z7R=p9ESAq%`jkc0N( z9NZRCT#|&YEfv}%b=At{Gp07Nk=;D8%6qB6iP1{j1heygvDF5L&;=!up4s)dwZuuM zh!qd+&t|3tQ=z2boMiXNDk$*Lk?p#?y01m4 zkM{h*ivfB)r?C7sDFGp0_lkf>(AQyuQ`pNPwQAlVml?<&Vt4vNRf1(&mA8AW3Z2Zt zU)-BGBAq1enp|!$9-p|0X3y6x;fiILks#4;Au^~z!Jj_CmuXgi;2cVMulT+%qVq)q z1)}`@nodUgZtz6Jci7&-%F1jC@w|7VTSAuY@a5MnYtULp0II8oxsJ3z47jJN$|;TO zl*H|rI60=N1PbhaC$}U2p2-SSE2aMNp>81VzsmMtd=)id@5-a}N8;-Of5@aLi!g9a zX(m*y*>9utsXcdFK!XI`)18ydG`5yP)^(cvsUFbo(c<)ehC%lembI?8`C`W~?zEhi zi%iw(JO2sRP|xHnB-ER(pYD;@9vnzA=V8>XoM?G-6(mb)$k0hMwm$!cY_%cWxzetN zhkrDzr5_q}&b(HuKMkFV&+96s3jcvao$0u}I1D)gcaGQ3!kXNd2BCOoCh-prh2G|a zPC?;KFSsqNN@mwg5~0p$DhHNYMfecM-<>VjXhN`gA5Jw+ci*sm$H<=;5`9CMINGZv zKpAZo&OO{NRHpw84y;W=^OAp#r zI(H+I-xi93V3zS3T@0t^A z)#+!?Cuotq-O0ezT0T4BwKki1G@{QAQBUpkW?vZs1TnnM*Sa${7l(7&SE~~$oP{4( z&n@@r?%VHrpM2mxV;(@SuXVuPKZ7m~xz2Qdazv~1(QX~>?P+YGpLtysk>&W<&iFOcW&DmQDeyEUZ@TKwO* zzqJ$Ul=A9sJ9pW+=d5_S!St!J>F(~n56yMu2Hzru&gXaL=8DIXU&616y!#zoRl=L~ zA+s%Bljhvw|ehQ3!8x%$2ykkX&K41kx!mBOU?3~K=>xqRL9pjnsu52ytn-OZ8b7Ej2(I?W~2-qlok_ETTMJ4oq z*Hkp*hB&fK;d@v|0&Z&U zNC9nn?ARO1Z<+~gqD~JLORJoiUKSZ}iuP2<8djhOorwzqDrtdHAk)d>i~9&E=Lc}w zwd@Z#ur>&sPBX4)SRlfk}7jCFJVINER^s(qU@vp|Rp zg-lcCjpbS9odn1bNn)L(nC25j1n|g)KN=`mv+O#46dr5_$EJ({S!l_Q)#2j~c!~H? zA5uzGD@Zy|7ROOrAXWOp#)y8SRcR3Kh^LuF(!w%88Tiq0y$#u3tNNSN2t0Tg&bu@H zRxDgDyug0Z$;HhxvBCZCdP{bG{3=cw8TBN-%k|EuE6CbDuSTgJJ{6o~ljFLg3AZRF z1V!ha`vcD(y<)cNo_aHxzJb1-z0c+@AW+XBoI#FL3+D(7xi)36UP~ROgM%X$F==SI zcJ&aEr7EoUTo3->%gv$QQ^U*0w)s29vQ>S|caD!I`}FA;85|zgDj( z*rvRJNrHfvNOmjL`P#$9@7t6My{^^0J=(ShJ?*9C<=v%P8Ue3c7ACDS+K{RxjT)Qn z1Uy(WhLBpQS(po_ji1>0x96B*tVK5VHw=~E2HLN$dAT92{e7o-NS`PBzVef8oBzFk z{6Zg_F!;Y@?|(6BbC-b#ng4aeh+1F{^q%EI|AXgPCj{Z}8Gn#uX6K%(Hc?b*GRak2 z;fi|SLK-cOi#aL1X5ggXbn6B4QP%sD0io4osczBOJ?5{?$j7_$yvGv}#*qnmp?5n- zXe+D2d;N#cEl#Y4gJ_N`Kf_8(Z9~?-3wq#K4~BBb_FLLLEyyEOFl z5&36a7N@+Hb|cY3w%gm=HB^YvF8VfMfNqvJ z|6jd>$ks#rj-`s(WSH2TI1rxdsIi}ZV!?unke$r0--}JVF5hsUSTnPYCUd)t0MN4dfDT<&<-dt0YWLO(q}J<=;qf|p-) zm!KIm8Em(_B7b3T|D8^vB-xfP+dQ~>BbIV00}ZyFl$6fqVKiu=S%KHFvh>W630gNm z-YuZmlGK;?!?(dbp{Py@tO(g;MmIrYbVKajwzT8l@%fjZeLElWCrJ_>e%OmmGf3!i zB2FLX$T4F0q$Y#lGl}*oIb}jv+^FTg(CE%hdDM(hH&=uux;6?)|Az|zSM)rbyjthW zMXx}VC6zeAr=hU)?LfJvFi4P;GUQA!nZX$5TL0<70r?TA6>~ zw+3^7d;Ss)fXFMnWm^pxjy6(CGP=D_GN%#_2pi8LaE(8=EhX4f-4Fo^lHO<5%)i~g zHSJAoZIc;NgTHJ}X2$_UZIkPzOPiUW>HbR6h|X&lg#Ad7L&Qy`rHmyywKy9AL6lk9 z+54Q-PToiCVxz=$JHi=Ic{`7}CF;Vqzcn7nwYAB;j&|UY4Z?=^?PNB`)zsDYy?V|> zeDh^Swcy#-0qRLf$~hI1oDnMOYB(?2J53(7G!2`NjE;4f!)J4$hvJA?M%NYf#yT zwKwaK)>^JjU1QLuUGK>e97xQNLb}u1t;RK&xgyr5x16tnBq8yi1)q{Cz;Y<6kX|N3 zdGt%z=cdq8%;x&A@JP((Z?OT(UdsazZbDnq;YG=~ z;2PPF1`K2|C#wv=;g`4!iQL_9ia+M`y#)AiUW7@2l>B@c`~`tqDM)^CT=YupX?PP| zrgnZNwhnq~lm+uLJ>IKGsKGx$tjVvZE_xG0Qx1^?SaY3nL<3AeV4@h$Ne=Hn3_5qp zYs=2t&1;s8-{yDB-#k3bE4WykE2F*PN8ix2Pk~5vL55t$(s^LclG`pfifK%oF3sZR z3#Ck-zhf$P=LOUHFAc$LG$P=PB+HTr`Veq&aZR_d&W}2(MDn5Q?DxwZP$YnZonGw2 zyP;`34tGSERH82HLwV|W`dBEKbwsJR2ytgPnJF=v)VT5_kMyryc)GVx4r-{H&KoZ* zp6)3n!0tH*Jun*+qvWiHEoB2tV$Q72aBnC%@#ZSv=B{;OPM4tD7oRi^=0AtK)Rt zYn6zWHUAjpe7jmqlms=OSL+K>#)xCr?^)U7AwzcV@BH*aX!|0e>WlNlS1GBIVwQR? z1KCzi6V116bF`zy<(U}RKEacaFK{&4cdcA&HFKasDA`R!5sqOPG$cM}RcZm4^h&y) zdj>B@Gj8CPV)rA6CsC^8omb>X$0Y9XL*7rqq+EM`N;hSEre>u3e7?~hw$&xhl^+~0r)@}E zlfoIvPN&9ZqYS#e%tsKtTuF6e{&qJ;8oHJ z?~C&Ve?rgfjli4K2;T0`%~*eTe8D?e8_GrA*~gc@Dto_|?|J-#_{p2-umTr6nAci* z>w|JBA+&ML9&oYaxnlHqF~h>WSqPy|>Nr@(up6PtZcpJ6F5Z}$TbBOz z*fb`IURC|sQkaPgh{*1r^TI>T+;i_~Iuh^~;FjnYG%0HhrP5XGchqCS8AMs4KT1gy6cc_(+Mv2bbF8IZxzVq@2gMIaC zKdXGU)PBIa^w@F(`t@JPa%6-wEWiFt@s9tWo?Q!Q)km262k|vudv;A_r9;js&7_(& z#}JgYnBlHwN9^|#L*3dMVdMR3#;o$|on9YBgUiI(LVMOwCtGXk#I*(RmA?zHneX+% zHX@*t6d;6Xz(xxx=XtlA%f+jJHqhTBFa$2tsG84}s%y(q9U_85BPtADP}vN@Q4#2w zTgmZcs+D@Yir;xuMd7X}`9Hb5k9yznI$wm+dFP_vW|J2?_qaLdY%$J#$*t@6?FBN^ zDWkYKvOoGms)=o0=4AEU(M?5Qhed`b#-2_^u|Bjb>Ek|=&3YqQ2&NQEu`8)ijj@ne zMw3lQO6AJd*pd2-2$fyuXUMy4@;|9ipcsP6;lgct3M`{$7mU=m(d-#(l+^KHs(EQ# z<+yxE^@x(|?LAe6?vsdb=U-&v1{($swjd_KAB|G!RaPdA% zw)0&$XJOd5gb^0TTt8h^++Dm`?8wmWh|EMbSzo>U@HzJ$L{^kz4uHlR|gh|ALvx%Dlz zFB$B^|Db$}rvthur*uQTxO5^wm82UT3Hxuvd?1pB`cc*aV=(Rl6l-gpaI6emFjM0# zqH;Ky-T(csrvlV+%CQfx(+t`G6AT|T$h`D>WeIMI*s=FE<`rZ7L?S6z%GA_0M3XsX zccsC8&%c4@%Nv@LDfA~BtTv>oXzx}E!m*?;nsaFAxy|p~%T9&-=WLpro2mKTbOk{0 zyMNN{lGmF9(K~KPa@C93W=w_m%RkpOX28$ept5WJeA1^jmR-_+d)9j1J3Wjo_Iyv%&m-5Hw&j2&bB}!g z4Ban;3dMX}LI$ATRaU5OBN3k>dy)+XU=jRtHO8(+*KU~!C%VU(38(+oC(Z3N~;^rOe#G~cr zq^}{FAKpae-|tQRWJyarS1Zab4p{!JoPf6eLGR>%~HF zdyK$VRT(Mat(NfL)PRakjm!2_LgC|p>3F(8o6#&xK)|b(pwGx^Xxw=H}|s$GP(P|LOWpEn9K6EOT3AtBHwB<1R%RqV*Y2;*n*f}`)GxOz}F~Y zjv388&n%kDEWYtk>?JsfA%TXKS92kmjMFW)0=Y;YS6o|ML3LG2x__TfRjN{c{+Q_Z zAC)M*`WS*2@ri|7IQ0`MPElxuR5l5kvg+Fbl6Fhbh6l_~D>$W_k_7j?==C}u+>TtL z!;A(y^ZjT`pTyC8tCga-AO&$@&^?kyshXycDT6XmWEbL{CAy)ipW|vH>qX~LMsz#Ejn7avZelNB;R=MxD#f0Zqk@PL%iRl=z;oBFbv zbKF0kEfOstk)#;_lG# zL!bBd+EdabIGV8R*$n5RBy)=Q{KJX_paVQ1WuTy|?uk#I$mo@rQEHv2YtBsd+?kQGEq+8|!^Lxe4#?fq(u(&8ZVIqYdP( zqiP9V3h39}W%{q)W8Xwzv1i70%aaShEI-v32GjH-g5Z%>E*5i^TFgnl;v`K~__&A0 zQiyKXY08>GgQSp__#9UZLg&{S?H84swJ*9%V89GW#DCjgK7wUYbu+aL{08GI;fxG9 zVsk69vqkcen-SpFWQioyp{)|gMM6I!VoHP|#MSuDqZHafj853FLkZ6dv6clNq>8!p`pHzGnN{16791Y^r(ow4k+Z11jxMqLVx{= zTkCj2@ROe5jKujiPhdaF+71#Z<~89Sa@LPZIBeSX#l%Sl;L=hP*5Do|%`O21&q5*R zkqV_3bLr=`QW`|6ucY0L3m&G9y4gHFNL#1Bl(x5QLau+Jt+kR!vF)*hE(y7Ka`q;l z2ya8IBJV`025ZA+an122)*40$cbz?qWS7_5bw;fDN-e! zwn$-ar!O#<6c*nPTz@Lx{&kSI;8UJI%RX4(DnN;B{PU-5F;fu-(*QbNS*%pDxdI9# zFzj-v+$IJ@)aK%?^&oV?IV~p@ty*Pj6-<19z(J?&^zyNq2TT}le@86CjA~s4)uMW& zyY~{{S{}!omcoti8D*Aw7PG%Y!=nLIC&XZmex8c^IfrW_Txm-(ytK{PH(CQ3Irl1x z>W5A`A!gd*4p%5~qg_jtc{M-AJv%#3&~P8By8lnjBLh6FtfYUC;Pnp~Ri(H?;5;p8 zUh@b+vA|i6lAoadQXYop`}Ur6S1%C{4HGjdv72$2(PpXy8+stR4x^Gu32I%#?LP2n z*<@*XWj|Z!CdgQIu9jDZEhs(=(3*`F1*w#((M1dJB@RYGOq)>fr+~S)h2ptTXf;f8 zG?K+OX?ws4K?K@k=e@HtU{1cD4;;j^Iy@|b%Lr1NeVBbt0$A}yPHg6b+m8GSs#x>Vk9=H=hgIjxUyxso~&WaZ#nBT8c z^X(M=O{iSL8wN6PU?BhHde*WJhM_8r_uW|##lnix`)^O`5ViqFOS<3^-<7b|CQj!S z0gFQ2T-oIA8jkY!l$wMo3mSyYW!QpW(6P2sKh)0et6{lht43+u<0wPk;4kG6c zF)XE35G7%)>IPJ`kwDnK*jQ+suCRBr2VL$X$IHaf`;ZaMn{9{#8W-GdV-VN6dAf@m ze8{c_sZbrX^`vv}mTQt~ffFqFa88{?v+HZ4v)B@Ky0!cNZ5s3-i( zQ9%0-FMHepiH3nh-*1@WC9teDwK%Rp5;+PoN?{=1g*s z2?z+_&<$(7CY>De1@_2AHgmy^PDp6};)8!bV*oKGgh&Q5jcVDtX1Yyug>A+?)lf zm?7#v8|Z(=D#FCUT{7#PNaaBN;QRR%3Xq$}@X5Ehqs}U(WseV4vWoWNybaWa57akF z6>a{soc11iD)HCMd<04I;C(D0c^G|Bs=c;gnV*i66VW!0r7y`Okubisp5V5CkJb(W zgwfwS7+?aoZ8gLeqmK_W2+!f0`Tu)=xa9#|xf|g9E%X5aT*&mdp77r+`1lB#Y_LTZ zUXZ<>Is#+>r|T*3%t7P#t?Z+=5EjvV5>~z5%`uzf`u$!#72efJX#4f~ z^W*IfZnOab#(F1z#US*Z8( zFEy(2gZFYn8r+`heg=4M2(t0SVJjZTTZ_Equ7KjdZ7~SySQYKJrYeos^he0Tv&UTF zN@pOyv&rdsXP!*?0uIFLN3rQlA99I$D;v0N0lx~aK*n)1=;Z~q+Vd2_Iofxl#ax)p z82r9=f`)Etz0;W@G%RfEME~Q$1%5Ru|E0Ei5%w=F<0luksTMq`Er~s8LikaM`*HJOgH|)QnG){BWahUe;EzlTUffX-wXvWEBtm zSAPU3xt!7swBj=Gfc{7i19&RH-WDW|hhn_66?cp9H2|~I6heV@mvxCA`ro@Xh{lVP z0L?^8AgcwL!S>6#V&W(w=C8=~^z`@FGQ`~~*iFAq^eOnOj0a}`LZSMcw2*kxR~*YY zeO2Qo(RpCzd&4DYn>xHG7f&vCc(@e_e`R6enqbsE)%=0m|G9Hm19MS%fB`k~EPb}P zRO|F35w8`?{M60BnPDqZ(U}Bszf@*g+DIDY+c%3>rSiJ*Ipc2p)`joQ$UYtg zndBm8NR$r2^a71+9Nl|^9E0vi3o;{YN7e%-D)%^IK+T^6!YnV}hcRSdoK&f_v%lBi zW}8ZXELe&|9CQ|FnM~ad0r$mYFapJ``|=A45kU zj@Ud!Z+7)7l!Z2=foakBoy(LxjRSbv6ne#wC zOT|~o1j2TnzH$JRm6aGb`GPT+T>|o7?uO>gjuY_IHP?zn%x6W3vPej}`&tW7Im(2I zcLu*>v>EEpjA1M>>=S*xob%!6QBIs^D26^75 z&{+ZgWmvhi)VC#uZQjriaIBvc=7R&t$?)myr=ua}u35oV)MiU_bBdueB#YG_E*OtX z_4M@QxAgmq%Rse7KYn!O>?D|*n@`oK;=ajdp|q2h7I^ngWc&Cy!hW$z9!o#*_?lQH z+!+Moncu#V16mDQ73Ck^yTI^kxY-=q=7#Zcacm3*nD-&cuDm}ig&GFXP;41bpMn1^UEcw%A#@8%a<-mi6GFDyK~_W|hzb`5r1je2A1PX^Ko`g(f9 ze>4kSb*8g`sQ{z_s;rZQQKFfy#b|wf-LlW!Dcjg%ZKXwgvFU;1r``J3t=evG&QbVo z15N^HP-cU3^x`9WIO8~XQ{}xWpECJ!Onl`+uBRb?p$S`|JI&UMF&s$S7HtY2Yw;@p zb$B$;*!;O9%Xi9c#W~-;%WC=(I@pke3KoOcPzrxjXRIcH7_H(d`L?QniA%*>#92ym z3>L3550h_ZNNxTyWm{Aiy5Uv*UisTj@o<#9pRK9VAtE7T$_w4`k*VW`V_&-mCzG5;`U(lsdXRS%dEb^62baRMF_E{Y zDE(Cu%SHS#B1VM|rDzmcm8U)A!NtwJf6}rzJTye9?h+r~)gm_9DIj=V!zj$~_*t+J z_sc?FMyszcPbv&$YI6ic-9}XNJGOu$ zw{?GV{@!ag=iYmZ$xWh_K#!Y$F`e0@$_B8In43GgsM1rfqxV5m(Vx*vT0ddV(Y_jK%-EGU zvp}o9A-cCl8bkNsY|X^2i0$`+c{RhzjfFAmwXp$w!&b1FI_0Zx@HwZ4!hTy!&e1dozR5tjw9mN{d&`O%G_6L zpId7^`z8IaUtr4ePCg-e1&8003baNCdLRYEe(W`HJ=5&$M?zT*VZ0*fo?cE4%zyVq z+qXxalqfsvQ(WrKmSuT@5wB8%TT(cXo4#CrD0!Uv#7H=&tMQxbFd5_Tvx)P7`(I;+en9M3g zctiPjlPTq|Z9RdKNF3*5!(gG|$egqfFeCVfSE)nsHM#p9X6&)t_LfJzU%;;4>$1%yy`CWA(+{qMH4>|5M_o|Xly-}(#KO| zi6l(aGrZFms3C;9Bg{6%IY9aH2=XKT^}xioquKA|c|h=|>JXRrwWZ6b!#SoH1f0xX z?c~@}9?};2v5WLFYVrdM?@=nc%;Ed^LrZ?JD6Zp25Um8?c~}eoEU_8G`EP@z8WyvF)l+`G_Pi5WZ>~81sPBFqO8R!h)^_ zg~OWPgUcKr-%Yb8NazBVC>s)Vvok=0N7`Qt z>SxwmPiP>P4bA4+fV9oqeh=BvrsO8Mf2{Lv#+ExX5b@ubA#mEeSIG-^xWt@b(>(kS z7a-biju>A0*YEhaAd}5_vJ%xoUOSER;5lh$TDoHqX!qIKU2M`os5_XZ?u8GSKqLBo z0X%*_F`oU?<>;mMN$y=Ue#n8+W4`J00um;#gpLh{oBctwx%p1jcHl)P61rHZ)u~>t z{kc~TbRQj^1=76vGhk=+QI5q)9SYzpk#k9#*3mWw;c z6BIp>L=5$@D4vfKc;Zmj@r zS_He^a(5Cr#_@%Fx0^42iRdXXyUu#ohRJYyE~gbiHE-$}nwW>XE(NZukBx<+Esmi6 zZg;44;n`E({-8Ej$KK+Ej?%tF_LpR-!AIn5(-%qsRc_A$JXK2%_nu-SjnOvoy<%HT zY?g}Kexq#W#QGLw2Z1a*a*^ezanFV)e&d6r2tN&)qv5@9|gyEc2RVx~@J_mBmN{U{dg2$5!~%Pt^e z(k}lzGH>5KTaI#ljhZkv-mW$EEZpp|i{fO|5;c{}v1cK+VVOHMp0C$_R&uneA`~D4 zs*hJbdY3kuTO7`PD^1DGoM}?UYbQt|{xQZ&+JF+o5m0aMP~}j>-R;{m!TZ$?3?}*r;mkRactvn$3|g5zP?&%)l_i zZv@di{sgM%LT@j`0TTybU*a#$>VTO9yaQR5i39cV6QlEAg2V|`Y{2p+jy_l6&sIW4 zRTVjlql3xe7MkJujO+eEe19-o5W7$uC7a6nv^ew0!GW=j5+eA{oykO?1=Q>Q>uS@? z4Fwl;m{%|n2$=Ex$sT3+1`nLtDW5bo95Gxq6ubsD-jcRvhH96m@3tc>7;s7PBL9K7 z<%-jvsu{{#a$>A2OWM2DB{*B7tB1`j!|7n`9WegWS4*Yp)eNdCrB#D|RJ=*Ug8eD~ zUxavX0X1kZM83F4tDs279v;%{=zRjzFc-r7;sI&4yGFnNurL zb)C(MFXCM0cHMIYoL}zgg!H_wg6Mgi1GQF60zgvzpy-H+~z6vu*zapxh_OIrF@Y~z(^2BY- z2t?pMOCgA`{L<+sCFgt4GbH(znu_nO3!k4}M^go_BNW(D$^AP} zdXT<;Z4CT|`h+jxien;JVd{5kQ$O>OMQt2l-KFBc%}>nlsS-xQP@BAsKHg^&#*T}T zV6;&_k4dboFm={K=xoN1G+9IZXOU7Np7{EWX_xP5@m~WzREN&>Enq;al3uN<0snQT z{&Iify+N^nn3p|lo7yrO`g)w{k4a4Am@>wk;&v2+T*T1FrzEWOZ=YXv*`b9->7YtL z!M@@5P0py})u5nb@voJ%kkxri&-0QZsMIdL$-i=B@4V!9`^Y;#z?O4pq`TVMtR|5l-B3CNzrd$rcxQ1|&>;J>r={_Cg9Quf!FetjzFbj}T+vos8iWD^v?q)N`z9LZD1 z!_oGxNMqit#ZJX{o&8;I#q4@p_Jp~$={W4)*{xEJaD2x-a7sXMu-K?4)C3epf!YOTWAV z&kOr>OzhhHErlCL)K$gBV9J=hUKZq^>IzZxW3q_85Ja>lC`hXPV0^pC2;|NjzXXxI zV16J1+NV(_0)m#+;ur#1z5)YbG0gb+jcV7H*ez0fBh68mulr^T8E*dId_2xN(E(Om z91b3lEDO9`DEPPH&?qXwFnBgzz*ag{9>L|q29~2BSD#MwV@}8VzPKk^k6L8jf7co4>m40E-5 z){=iyZe(^6NL4ZioJ|pB@zN5j8hjL*icEPf{@*nPzsfXM^N=&*0h8oq6~IelIk-l+ z5~buqOG%K#i7z)PT<^Octlip_krP5|n_sAO*V`-gh;%zsUa2DDYH67`#DE{vP1a^< z6%>w05AqBMU`|tPt?^kb(%rSM7%rdFZ4jgONcQCMq+2}~E zyUD>17t)pL``^U1Q6xG14Cy?R!F#>9^#s9%18Jrh2Wqm_Kbz~x;cw;P2PbeCX3WRm zCwb&)MwMIPU?!!OO?eEVSDm?+zw5(dI>7?I_ipxxsOr`huu(Cc@Z|j*5)m)HzmyVn zDhwqjlg8Ia4%iyEs8TM$%{}roNDusFW$JD;LzErj**2m`RT5xA{kl~JX`%m+%9Kw9T&zm6te7q`dC#vn0@qdvDU_^=jYn^AtsCon zsS@;kZ}V=59fkyjbcoeGFL$O?#9zhWR5txx9EY0dldxRTK}@2b)_PIi6DhD9oog8m z-(VtH0cfEHIqVFs+?~wZrz0HPYCpxyNLqZG(2bpLpw16L5DI61Uzsi(WRKrrU>Gs3 zmM*M#c0L*(J5SEC|KCfF3M_d8$K>Sef&*3h>w>%Mz>MC_ZSf<6_&?Y*vQ38c$n!gx z%A%b)^wY@=&}$ERK$gSh_?s$#3t*L@k z%%Maw)jLK}h9nMIS5>0ZKiFB*$5RBUB!|(ensu=0g!$*4XHp$Cr-us91J~Gm??nyf ze_~#Rk`@kla39Cx4fPwyL-OCQfj5w22l1usUV^t@DL=NO(2lW|-BNyBa8*dT@s#sw zvV|^*zSo#2?f~y4rLA@j^`an$&OfxFH<|m&Y(-CB>TNB!Jnj-jdiv`pv?#6UwiKTZ zY6#i6-c`Iy?-{3>ABpAeR zybyu`MA&M`64eR-nP;N|KFj})y|4bOYW>y~5RmSc5JVINLFpEd6i{H%-MQ#)Nu?V^ zO1eQBCc8<{Z92Zf5N6(j7<9c%@A&3jduSxHUVQgd!xBXCDb zs>~GW%t0xJRJKCKVHj4dExRUqg&!+*oS1|VR#%?K{%2=L6N z8&uNHTS9gB(yE?v@9C4Ta%u{ntBVTVr+i3{p+(7l z(JoOpU%Ap_`%)>x{kfgu`?6i0)js#nRQx+DyU?`=Pf#%Lx<7{F=BxgoE&w45eIrZ){MeBVvu@8{bX-&wQjKE$jIsSQrPFaF8E>GGc=s?Vo@Cgn1Eh|+ z1$R3g-Ojm(y!NPCN$=Z$d9etryM*%-A3MsmLc5-RJjFX_i4UiRIToNEhFLK>^CZr2gDJa{6=fz9zJ(_P?cU8Vr?Oqj+NyeNw zxJ!TC#Utayl&fwvUSK@(qWFYsUDME+DO=iL>dsL`%oC}yFkH|rA4OAb5*;wM{J_h5 z|M1CgCY0`@4M5GN(ZhGu)bI;e{a>#RSpv&5bCU3~Sr<~;bE@ctT=8;rhW2q|gm+i! zy)j|ZFM8B`8Wk24E^He|0mOyD!YSi+JSz5SJdaWF8tdzIO5Z3gOZCLwE=-X%ulYEI zKQV~V2)yC3%X`RPx4_+4V*Y|VO1OtSU>OAVrn0TatzK~;)jEGr(%zfv7H!tCaDqHvw9Yk^Z%7D$;bRvEyoc;8L}e&7()k~ERHq$x^|W%DO0#4iz|x4X^3D&hFB+e9B~;!CWjLqF@!NDP>YvC{M#%;vP?0{o?knX+$u{gxcI|;AZFn9DvLO#`Z^tK}vb?^8L=@Ir(lH2`m{dX(e12={gn2{k?R-muL6@W%eF5+GIZtk)-`-8<5ArW?VQM<0w}=fH*9BU?~_t< z6J{lPuxpscr6Xj-?*n-R@iO_!>;@xD6n3|QGp_ohf|~4J3VC);$eI@ZCug+MUpEhj z(7R~>gw9haHfjO#SV|bMmkB(@hyvI~(}DaErH*kF->+Ico{0}V5-(Hw36n&T+eA{S&Fa^8nGOXfP;DU;;#g zgs9009F#g<3S6fk;8h6+W8?{|Se+2nsi!kN%N;?J*iaWEJ<`i74*VEiTO~MZR7J7q zjPG797U*dEHA!pSg*Kh&56%OJP2S^PG|Q5#Qe}H(q}cur9M|6LKdPQ=F;F35XiJJ0o~TX$ZfZqm5z3qh+fb8 z-4#`?BKE9?ugc$?R746u@W)|Q-X*}kX5KRzmcT!7KArKikYVn#s)+}C=4sr5E?*D< zF4tqBbMG}T`kQp5yAL}+5X&it9))XDR#F&fnS8nMV@c)it$p1JHYop{BE9rqf1Hf8kg(8}0 z4)im{myTNL8r9F*3Fjg(G=68+zj@9Zpn{+}UdxG%Wf94a1H zD_dYswszQ=;j@Q)Dp7G+-~jqkh^RrdYJO{$^BnNJy{-V}zrOT$eI>jOuo0|Aw!L*G zy1@e@gLh~2D*n`&nnFO|CdtEb9J9W{TP#pgxh%cV zVoB@03_$e49b|NbUCTog>ZGB82rTWILYSDzS$|cmzu8|TAOTQ37u~7GFiPLzx>%(! z{>KYQj}mTwqjLSSy+=y{2!R_SQ?nG!z}g#1k}A1>mX>fVZUvG9*IC!(oJzOw#2npV zmgnDraR*tQ(3E^P8i0hsh&!U1*P+PnBL9ysXb~ov>nnOSK!fxs#a#IHr@!6wpUD4T z-$d~}*1?sX1q&eaG!dU3UKI_V;D^>L{t4cHU-35r3ju=I37uCkdq14?{QPL!{jXpA z53K%eH)Q~+4^Q_Y0C&;BaCh}WsndRE>-<08+Rs0mmjLd}s{eZ+Xeo< z*TE0KHD;-rh=D@bLhl(+OiswK?*2FX|LYsBfT8Y*h_eU7>~OUG`SA;K|8K@n2b`gI zx_tr&Nl)hOm2E>UC`&|`y!F30TD_k=`s2@Q2`SAxP|L2$dZ~p_ty1Pre|M>!c{clSI zV2bNtYFb%9^86tT{{W<;zkd9mZJ163cv6Fa2Ow5VOtfjIrumj6=+314Md$ysInLlb ziTXe^%M2cyrvD7ke-=Z3$nmfDT}j&_oKPK93V+M$lf6eT{x_)pH>mzMsQwDI|KEbj z^lf@oRXi_Yx{n+(CW@b&dnfoWdR#F2mHp2wz+V~c9ncyF?}z2jmIMP>Dh|xC{FTdn z#Su{hYaqHMy~+NF7x<>+tIub~CeF&Z{tbV)9&nln?1Ss{3Xb};|AhW+64U#uocHYL zhm49@tO?QAMTHA{p(-iUtldU=g%JmO&;AW=G~94KVXbC!qW~ym&DgV^{qy@w5xxCu zz3mYl=-iR{p$fm0PO^98d@P#8tq#mbdqeA{Le>3 zYSSc|t%HJ``vH?S;zOAM}b1A4*kNUsa$P{j) zox#o3Ho!*4k2iVS{?>%jTbhKc!39PUU73xT6eS{A25gu%Z-W7Pmmyr9OWk*^?K@;R z4OFd+S|!Xd3H7i4yHXLBf%6sX|HxO)l+sxStu#}%4r=fk7YqTF zM$jsm0U#LITIu7Y0bjJ!T20@-8=_HU*~4&9%XVMS;g9}_9*7bxJ5B~ahO>bU^9W*N z3F(UMc~P*)LkZ+~Yh_6m9Ju#?^pOE)XV4!W9Svp6kno%mE+21AEnxvtVqQSNDg@TPjUJp>yU?|UY<`5YNnRj?9SOz&ho~o<^ zO&eHEx%1DEvpGZD^t1qjSyJ5=*{t$?M{XTZhKG6SIGC}N3n8*tdG<;wAieLYP~hY* zzNV{1`$7B^C7FeL_?kBlirB$yF5rQ7nu-RLhi{pO5B7y=~vrNL%PJ-viM_;|`31cFJ4$20){Ql6_ zdvX9}YUL?$t7Al|b0f#vz+n2P)R>3kur)E@C=*aJy(2QSIs$Yc&Kg^LC#rqW9YX4R z$?M`x6JI+L)tzkpG}z=HgSjAr6x7_0uqHCxBU060S-D$|je%U&Z!X^`8gvoJ%UVPz z9^U^0XLpkT%g|7~q148Phb(Yn)=#lr50PV6y`i}Xb2>ze8)&QYJkcvl#8kgb4p~Oq ziX(bl;ZA zcv91s`*9rR#bZKMtQE&QxZ!V?=DG@M2L`xIBH>M}bvW89>R+f&{%oChP>P zumFs-J$TdxH?e=Ek6US-%a92F>Aqjh+D#Cv&0xCIflaOEnbGcdm(!$Nn_JNcJ7%cU z+ZCzbVADzucyCRoVuT8?*SbmOZ-A)`X(2{anONZ|%kK6)R>kN%2^AJg#H0FlYiLHh zV^Q1gcWJz4AlgKb5E0(B4#q1}-$d*&s^508)y6xGudlq`;!6G+%oHx&k_YBgrRTQ< zx@D>7`M3dbyW-8(G4|TFN}m(DMjP3o7?ykc zVn>tX5rTx1Rg-m)UEj#CTsg_k)3@V7J#1sBoL`cfD7Q%+1sHL`HOxOiVY%K< zMWleg5>TGn817@^TS??Wr83||oS;haIcTKSi*YhEW6t|D^{`oo2G$qofx2iZek{kk zujlJ@o^vlfhMi=+03CY9oLeEn<r%V(MhAR~12(b2* z=YuCmZWgk?Thu{M->}4GVN|IYj8UOh5%EQ7EJWLk`DKtPYw|y*I^ZNw;w(f|KN)+H zzgr-{kr%03Eul|}PuD-Gl3s}Hl8ix_XD97w?k7C)5KU7aZ6?}q?$N8vDYmU6i+LN4 z67sQ2w#H?*QHZ;}z>aGux)Im%`(&lpTr;s6GCO$&0%3{rW z9bsKPw`R!^fYIvf8%&>E@ss_@KECkmQ`Yqi*A3pf&4jDUz}Uh4FD;k7J!}?0k6bd* zBF22CBw8H}sw%;%eZr+e}w zpE(HPN)5T{UX@L@*^-@D8tyG^SzeO2JSv}w`prXq=i$wgD}(JlCd2ftJv`H4!C&CA z1o8PO_~XHgy#k1a_q3iw^<9kTOG-q=QibWE*g|C=hPT9B?jU^@4lsf2H zCt=7qDH(|RB9$xCr6xzyf*K!DSCup?0=4N}VwnV?X$SygRgZ0{ey1?{47i$-Dq#Imp$%IdKz=t~2 zgdd5APGRs*pY^hkgS@3H%mCw*}@$tS2^UWre4*58xtP{WE` z!^cjX36c%Wn=juYpiK9i~Il*VRnrO$eL0#YMp;Uyeb6 zvLuWTD9Q_lUsAgf@?2k>XHllc{;_2XTq<-iAT||%s~LEJ(hJul)rlq*mc@LP*o+;& zw9$klj7uhNMH{MsHtaHmS<&(ev>ar$ zb6B%1@>gr38hL@pDYGB<`NbgCdyS7IWd6Xn`q{j3mHK&8gGD~-(@xV>#Wg#t6t<^4 zYYU?zxo1iRUd6N4O{oE9-y4GyCsos`x?=taYPh8E+cNlsSepR9Be!3-HGlv#Kd)U} zd79L8=tl~$|wsj@+t}OE%E9U*+ToBYbDc#$G@hO7a&%QEBEl$LP$sG z@H6XQ(a;SbRxqk9i~Hgdyz$+|QOLMF*rQhuxL|9fwOJ@reJFlcKCf4ps{?GV4ITt^ zMy!t+%K<-SmX?=y3knNt)^@6C=;?{LX6-1Pc4vmR@x(Ln$jFkc4wXR7#VHJIY-kYz zhq#(@xtw2L{N23*v=x}ozqA$Syp}E|#{J!Bh0z+{5tI&HG2^?7^fmhN(;4k8oeq!Q ztnBOl*fsa~(tw#g2FC2IT=YP5tnbpOG)qGAG)Sjdr|Z56DgZ+C4W+9#7hpx-LHxlS zjoCV)2_1KiFZOo01O%DInbR}ItfY~Vkv?DnH{HW!0_;RawZ=W>$B$dKOPe|dD5o_9 zrI<#>Itg-;)cY;_$=qv}kcD z!OQHul)Q+RuS*dN6ksr@Q$uwnp}R~WhMKJ)^O%JqA*nE`@Qvavmf9mZPb zl=cUPG&ht+HT1pxXLg{_X^`1cEHa33I)~X}vSc$2500MWL^z z3t73L9|$hQT8z=I^1I2_Q95?I85&f3iWQ-iHe|gq+qk1``A5la|8O+Hyr2w-`-nM_)Ks>~0Z|M17Md zB>$AGfQ^xUE1au8Wm^E=*b#F5Yge#aSaimCGl0s+cNJMFPvgyk$fth)fgy3%{g>`} zLg`#6bqE3;+aCAeHRwX?S~eUE?mh6m5Qh1>Q@5QB-M;c?Mtd)X-*(DoF4P6qTzcuM z#{9gFYzU-{>`GOnrRej$Iq8FK*L}JN)+(~SOQ*WP;p=CPtqS*@kGoiti||(w1u8=E zSINt)?3tbRj%oO3d}e=$cXT5460a?_lbHbMm2nxCApPrEOl5tD>KcUp<%ZBTL>8Pk z5A?tg8Lw`m#4USkPTc#5j2)1VzK_pwyYB^Qg0b4lJDwwA`(>?RRgub*UDSI8 zArH*gsr~!uR_506iC+7H#z(U27qhu z*3-ee{=yJTLjQY}Z9l&KAXt>1B{2@GlZxpo-XZH5qK=OqV zQ(Ud19@r7Y{As|>LjdM*-+lXGD2345dewxH_twoxVdqE0OQYY?s#TLTs8bk(-b@dKo1GN{+b{UnfqsbUFaXm4NwVNGj z<}t25@Cc%oEv~h2i-{-!^v-j+Rl=r=xgtGPRM6(*icp*8*DOTnp4T}+$yFBz_d-?- z2TxGDFKo^emAdH`t)%g=*Em-VOuEYZ36fVd^Zs1snNd2$XBgG9kgZ}~ULFyr(s0@GCXhj&N)@Lnj#>f5++KQejMW{g$@ay+@e zUa%2g8hnGpnRwA1xR4OQmnr;WvkC|SBzZip;SgWg)$4852J|N4Kj=q~jKteTt&60p2RwTAJ&cjvLb%7C`=Grob#MVsB_b3gB3J`AV^ z)s>X|HNZ7^2SWyfbVt_R>&9X#u0+cm*!E+tP%2bTuq$ddsN$fFsg#zHQQAT-#blx+ zrRJb+i>>DNm1M9W+QI_L@c`#%v&6!bE~~B&jw8BZQ=ts0ZYauIz7=1A{j6sa{-SJf z`g>-O2V;UeBOZK;3jwWl_sZFpY*s@(5d2=R#zu^P_%k!>UFDa0_rl4aseMP}C{b^y zOTX6)E^a&Uq_h3H&Mg*%Iv|SghV776p&cRLJ|GsBv4S(mQtXQ7UF?cFK4mU2_h_2} zc}fdl7xgiR{y!f!T@fL7#8+PM;@)243j%|P281puuDTzrDxk4d5|*})Vc!>yUqrP3 zSWBH)|6p!zzCZ3g+rp*8+>xhOz0^JAnT%yGfRAqdgAHlg**iGo#tB=FcSR|sr%dC| z+jPhRvU&I@c7GKdDQ12)4n>Oi3ZnO$N-@gvTi(k^FzC7%-)-x^khUL-RX^P{_Ycs} z<4hK@^mnQCWh~1`M;TcqY}+`U_g&)C6IM^;%yy-*q$FGSvf@FYkX0kyRrhHvp)V6& zt+3Ej1l6j%YWh&zNu}UE{l!TByEs1ssIDAB&zveF3vKA&zu){(La%bm1k;f@ur{s= zNpEIS0%)SH2>j|CWzeS2maeX>F5b?1Ve}?geZ>=%7n3z|uF+z1ynDDNr$T>-?k)QE z8!aZq527_K7rf}`XmY&#qwSe;r0+tPPY+M;7=-hpaaugv{*e)`vKE#0it_8@$Lb$u z-tDpJ4NHD<9B>T|3Be#T*Jk)rQ~c-F+YV!yr85@QRSvGFV%x!5Yq?}Fdo_McrNs!U zK$2)`{LT4VS{WFCQV>Wg(L!8ApH@7KJPg?SwfnaE{_*|U-8XOF`bG$OP+wo*w6ys8 zJru)+of6KZA%67fRSc;nj@KRNME^9%g`N=h0V|Oa8jkJ&O1zYtJfAdpEkW{KdAUYK zuV|~w*iVaS)ys^BtTad`oKaM)&^d{Wg&A%Nw1g$_z z7;T` zr?G#v$Z?B+tNA$^p4e*v(-<#^)#ZO>rXs!o5V&t{S6Dih?8%U^+M1#ek^h|MC05g! zWloU+VPGGN*4rm~TRHv7HZ#ivtmkJ0xMXBMb~8cB>K{~eJjv)JsNOHoiAzswlH^09 zMA!WR<-ldL%R{V3y>3KK=B2)&u2Q12SUQk%z-%Y}8p`#cWRT!f#RYS%GCm0mr$YLM z>#zJMNBx=QQ=`=GVP*;`?)w{smvt(cY3}qWwF&x>7G(J57!dOr*VRn=?*5?ykDWjv zs>$y7YqHJ2K`xU>ZAL(Pqg2%<{^>Hn5`yAf4b|ZOL;JNlSfHifGz#RsQd zV}QC6=gvfbLelNKc z30lVr0rPNSnaL6g&S4>y&V@Rz5j1=_N`~8Ga5w(^LhtHosYtpfHdw>3%2~kUJfKF}k&sOzb z|D~Yv!z(i`W8ZNDxXR21|A@rhlB-3^i%eLS+iBXFY@*x@S_tiX?H>@}>Ul#^+N?Z2 z09hJv-xb|#GAkafR7df5m3rsAjvxTNEuC$I47x0J zVyV`-upJ)S;Zab8*mi|VtQ}(}4fJMR|A_t?QhAab9nGsZx}I6N1@i6;LzGVM1TiTx zBfaNDa!|T6w>f{tJJ|SWk^bomEFM$g!W=oLZx6e7%JXehWUDpZBc)a(y49Ot)7BCW zR?r|{fTBpkBh##M@~@inx2j2a&jD^H!FLyXz*s{{7<}jrOY)whL_p2gF??>RII#O# zEwF207uT2gx3&Ba1iY@qLP>cP5LJ>@>f~>?q@LNMF}cdP&;DJdx|Wy{u9yil{h*qk8|k*ZqdfHv#01G1W%Jmb#Wi2W|Ghxl}c zOnZqI-$5w@RlEQD7*@+O8~%fJx41g6XkdxvXwL+5UZIy zA%o32GK9x)xG~7`V%x7?`9ug_-N}_dk5Sq-*89GzWz%~#ni;{1K3BluC6xC5MeYqU z)cttyg6Dcf-sT3G!~)?oLy6vfsofJvLU=5uODf$W#sm*hZ`eo&wQegE*gb7WYMCN> z`dTl#y_QMeymvqLTHUaAqDO4-Fbe}%;*$uN?+yj4(;v3;r;I~J1m! zMM>?cn`QJyBq-gz^%bM8fSq@XI%8ZNY{s#kcrev|_uS>C*2K_LJIE>Yhh>{q^;{LO zS=G*%N}1O+?~K!>mgLrqS|NH#Tnq10{`>L*=QaNAOg7uLF?bXNAzu!k*RGbbx*no& zpHz6VlEX+`v?VrUzReSf{L@r??&b`&R zFnORhD@%2}Z0B9Y0FO%HJji#~abhmG>Bo=D2?$7UjOkgE52#RO{tDw+YqE7i;F95o z6{#-D2=e^LVA6F#*f#reFMBT+)Q%#Wo*i5xaMj|e$^fcuq$kD3sqW$Jpo)!;)$Nh=!9Cj`~M;{Cg0n z0X}`kL5B0YEN+@jfzNHZ-sB)!9Nn+6P)4AADz54L8b3O-Pw=HeTp2k3OCX+!-=A54 zu&^*Fb&lJ};Hi~H)M$Q;KLqb`A`@gWezSjFU2~LrS!Uag;&HWb$!)NiX5gA`xe)UNom6|qJqLmnf)4eeoG^PfcuHKnVD3k zFzjUFCJbURu{cn_J2XChk{1wU6s_u5a)GWzgbrkHvs~q!+?*WqYtE!8p~tXJ`Dd3U zI*|J^Ue{#PoYul-ws>-hhbp0@d@nItr|Xz?D(}3Q^;WFM8Gc@;zrR03YUBTLT;1EE ztja;Q*I8w0!V1J~V#@CJ(7!cn>BNDLb`S)rdlsk_MixJ!C$R++sfp#tRmGIVirKS0 zC)Rws@vb6;*bi*k>-V}ry{^9y@%NA&%{{osE~Ia6NB?xzKOS2#0b2a( zO5ZJw-G$t4WHoi~8acxKbi1{F?}{Y8jP*F?N&MJFeQm8f+m;MRm_71t&80TbvR&H9 zrIOey5=?a2eu$Z+r_*){EbIGa6QkQ%OUVD(mSS_%(YiGy%m`aFS zzwzAN6<>#tf$LqSzDqq$MkC(MzTL)10o{9UCOE>mvFxOc_ZnnAV3fN;&5P8^36sZ5 zq9q%N>AK`P*ka^;L5R1)lY&}r1IZ5SER~+h4TMK;4DfkzKqG{1)!o;1+Y;%bVo95C zU%t=rqQqM-;B+Q`Zd-i9vrMjlqblR9T8aDv&p0cH0*|f&O{`bOlQ~`twrdslI0jNQ zZ4u(D&IjW)Ea$Cmlys#M0p)u$$W=Y{wsVt=2HU$)GaMCgIk|P5P_+6KyxR?5b zdh6=t8jifJ4Kj<$DJV!#>!BsG6&?%p37;(zN>D3vffEj4CKIR3F4sRK@K{QgB80G# zpl-Bk<6N9c*aGV;ecxZDHYk1+SVCIfoqCT^((n=^c+4{cX0_V$+@r1mv&%$cevfYH z>d@1<9U76*@EMWE(@pR2jBOoSLDAQLcZ-r}J)TV8*7w9}nM?tjv`PCp(bWkV9x16= zU~y|(`Z6EFEM-mUOihO^FC6|uka+wZ8j zfUmBdD@Th@g|Dua{6(&XovAJ}?#-2ycBqLi**bieoVvv<) zs_pGjG@!~xH<-ARQl+w19^!a^GN<|0Ou-Y6rKP2)vE0%tS3-0Q3?uc`lkSZP>LL8j zf@=zJ{qhWu2}RSIeoWb#lN$lDl95b8y1g6kaOM7xzm-OHx0l9SZRL`Hx`Jz^qyn|e z7d+asSj7eQbeXh}xxJx^gkyS}97J%h>ir(Uw5B6ouTR%xEDf9Dn54YJg&g)cDl5`c%=8*sb7TjKB~d-=67f`P6CJw4^Np%(SPFe($<&8O%`Wt z8-R@=2>NUb!n+5Y=rt?-uWNhXOS--p9@bS$;-ngY{sXY4;4MtPmxt`kK>i*3(GOoi zhJ=p@bv>IF@o3IRR|%79i*#UmD%*zNi08sj#hoH&g}xL&o(&o+Cq~QmfPjl7#u1$) zsg>SLvy&=cE55Wze2oLONzrcPaWeECCG3@-b*H1H%{+j4&xTK)bvRkgGLN7%U-Y$4 zHh6K|*iQR(uTP1>P61KNuqvvqXy_fY-8jUr$z`(#|Xe!+|-$!hg=qm}We z6#cI#JgYGt^~9T;(D41Iouc8>xLO*sEHHE10K69@;s%N($Z;Gf{@?met88!rPbv*w z&oPQk8)EW4hFix$#3de+&+91BV4X>x$gv2!d!IFUl^Q z=}WQ8#o1%6+qtZngLTsq@o;jdeYWJE#G4IHH?dY+5&4_v-aUDqNM^ZKXSqY@Z?TH;qm@@1BHfry3=s0r$6x!JBS5;FE zP5y%NVCw!jOMk)9Ko8W{71{lxfKHam*5}jj6$)SbinX`}G#zc2t|0#(T0S{_m_?Iq!U54Bg z&*6};?WC)aW|}3IG`-V)-M&jIkIRmT;dMcL>wd!Xx^e!w+LjYyTb5g0V|8|Of4g_8 z_kD>h2~dK~VB@jqlUD&lz^p;)47o=~8()$ll8$YMr#Sgr~T_p^clTI%n-7nYOsS z#wB_l=5n3$ZrZ;_6?1!zgh7s(Xb(bcmHw=~BBfuEChNw}%&? ztz6-ejyx)PFL^6(Y<#rQR*`2lfp^|g^d{bnV!9XvIS2Lk5p-!w=cCQzJBn4q2My?TgFKe>~#Dx3`so>5RE;BF1rl4ESk z6Ld=2y%?O{U%jch_Le#WyFQ$4GOj-Un(tcGb=sYamUy7S-bQ~965xt`R=pgk_`&9N z^JaV9Km>`{g3b#9+pRQoo3=1+MH_2L(JtQ-S4Tz=}uY0{RwDeW=3Kn=*sMw(5-8N?165U|?(5<}2fU|M1&N%X6~ik+tt4(luCn;h#oV6?s;! zZ#B`rG!H=E_qr{oYxv)o(3l^6wUzi)9tz{adrOP0W8bm?xc??R>g@NkuDM&MXb#g= zNs<`fXO}X1%jfxp8WG&kQHAl^ecfq!clLc@wi_phz>!bEYE@ePia;R~(|+kCIItkK zX*E0uFJZ6=)Uq?w6}Xlc#d;7>8*qplfR_;QK{OO%i+H>v->kb3P4qZn<0~kAGsJ2l zmA~d|p_k%LUcrn_K8woMrd;%s#LJVNY)~Cg5s$5HW{2C#Kwe$}M9*P`+HOM0rM{aR z(*pB{s?l~tSm&%w#<_GfU;V444u95s_WVn}&31miRjFV;jvs;F(YF&77kx6ZSs6G1*?vNNK4;`ZB>e+xi2xs&YJUUk_hb?$#qmA5XxccgcPXgP z1{vNf1l$k6h2+c z-g~KjEcj=jp32@{X^e8so9*3gJ;^xSN;*!^N}c8u)S$GjWBJ&r5L4NA7dW?9uTJT9 zHkw1Jgsos`C2`ra5WUoZIY=9?2oW0LWYyOEklRZc5jwO3G+K^sA7|9d7G!rl%!asf zk3yO&JO#9WG?15K3W$qXc-~r$tYR#M{}Ro8Y2Y1!Wuzj%USNL1@1^M^@-Mw=LrxE( z)bK4f>f-Dkuh^jI^7Cp-)esV@W>mkRh_={@XYnUi)6V1-fuTBHS3({f*8-os&xNuZ zGj*eM6~0feil7dE##$$~j=z&u%6_-?A?0Jpb_d)Z2gqCSCm28EC?EwoXzN*2@Pn% zGmh=PsC8c%>+Ad0Kb(?e`ITcMYHGCq>vN%f(3T8jBSC{#FrSn93B*-0ej>xae(!ro zr@hYC-qD`1nx#+?W-*Y48U#<{@M*V{bBn&D8DpIhLx!t-PjTeULUYB|cf8KC7w>mJ(FGa<0z{%~eIxfuiBweQpr;31+ zd`=Wbrp3A1@=qHo~AMZtyW1*)y0<((1z}< zpnDdq^N~Oaj)Ty21@bMIN3?&nRhoJ&^Rnc_u&m4n8kH-h_X)zoW8$RIRhv zle{Oow|%3qV|EHseBAf_>yq;|0o2WSVW#)y{d!`R_xe(W9K=o^GH%T9mIpct5s} zdt^(GMjtYP*YN}LJl|Mvm$Xs17rueM23cu@F4EI@y)1ATQ?Fx`@HW(_RXQIx9pR?7 zlqp~qMz^#5=#KKJw)rJM!1(T3-;(;NG`;W2dP!7sUa4=DGB#dir0}xdCWeR){ppwR zLR3-rQQc2Up;K*}7u9+Z$QALgK1V1uluT2GHz*M3TY^7vXWa_LM_!We*P-Mg(}p?&Tnsx*h#kYo0ODnW!(}Kq_uclQ%Q^viuUCx`Ka+epSQy@jnXODZv#@Uo_KU2|fkwnC+c#f6T1x7f7 z#r?0^z1`M!4+TZl5_IKmsE*^dG(qZFYR*;Si_4WI>fWP*GBW7UX#RKx=cyYp)t>fV2BFKoy~DXmMFQ!qJ*8aD84K4)qMI z&z9BZyD#WP&yp{oHZOgc@HNioTc5Y zoe2_ox;00)-w`T?#tMBLYXd57)BqWD7_6->xaQwgURaY!*m)@~qV?t+IPm^DH}rAW zD*|b@og)`!+?($p_d!UEOee7mSoyw}CfR#wG<&OI@!Fz;Ld5TUH%h^yE?mYk$+sy4 zA&ck)v@5(HjF{&gcy1jTNc7S*Biy(%1P2@UzMqrM_Lf|0U*`;vJ@T!r(;U0)%4LvF ze0rU5G%D?*zQZc%jy+>zj`xM0A2e6R=;8VH0>V5OS`)0UHcgCgGokNXj_fSkKS#ZO zbF#tCY^dy8-LuT}Rm4*sYX{h?>2q9KL7>;V!MTrHGLb?oGeViBi3@X}Lt*@hONgQ6 zCeSM5A5%-lzPv4g`bnbAnmjAqvUume9I%3Q!Kk+zHmpK)JfLOJZZJY%NN$1goPcsY zYO$l;ghbLCt7gsbp=InFZbnVsXMRx{`uO$?R03Q&Icb76DGPp3nb{jx8DOV*`#BGo z;`St@%S5rxeOhH#{%4x$zt`Pt@(4}h1#j-+-rB9}?WH===Cwi%Ny4*hDGa63^&uUM zBZW3P-I(m>K#!gPBEG5Pb z=~;>vwj=V2oU7;-+9s|IZ~JJDDK8m+;u+zieya>Q?~I2`x%adF$Q3IH8L^7`!m^9& zkf2*3IG9rFj-gG(i>-#m=rHDvUN}^gqJcb90o!fK9G2(kcc?wq(Q>ZU+#9TstAXs{ z8cuz#y!MKxah&QTQl}{%zi{mF+VtGjPGYI^>M`PQJP8Q#63#!CD4wQxWNqHFB2clN z@2WND9vK>fcWx&T5&VN3t*CwZ!u+8o=w`_<&B6@}SJZDjsG0c&0ZmW2fqmJOnKU)7 zo-#tl(a}>Uu9-ZUZjo<+xb{?utcwRfQm9dn$4SzKk6lm-W6HXZya?jDRxsBw%(8|# z5vs(RLa%p*bRE=-$xggu5~SsBchDMI`sR&lpLOva+&3?}@`5AZcL z?@6-~vQuty8#ep}eWyMD0&Q^=H87@=GJp={L{!tVvMQ(KBC=2b5$Y`K{gomoUxBJG+MJB|56JfmU)ab)MK8i=$y2zgq*?d3=lhl=%e^TVskM~DfO8t(-0iQ{E( z&l`&R3wEQ-NLHj*@-cLtzVFCPdt{EVI`@c}j(&jdk}OMqyaa^g%`Jq&^+_REk^YV{ z=<`JLn3PD@y#@y}78K{|r5hZ4`oXZa+mh*k-6t$hr^&nsClO z27wT6X+5t!-V(Bq>%0qLh&LPSFZCeEEvEK3@W&6`wwe&xjkDWj(vgkRQCXVF?0p!D zdPo^Ytrf zWK8x6r0L@18&)r?0Zh4AL=n_jKm*Y*B$SihdE{$aSyhUD1=cRV6B2Ut5cmG)V$oz3 z#eg-=60uN!iBe3mTu_8pl{>|*`gO9>x6Z|3@DBw&#tJ*`9fk9xz=`UT&idzHk1Agp zuqu!HfA7P2;dL?c{oLCPZ4&UJYJjv2M5-*WWC=1HcBIM2H|4!ge6W|w;*-lJ2sQcq)t zSdvmN$k60K&A^UEL?v=bLKw*SA5$kdWD3cZ9~>krd@X!8XYz1adE%s*l43vFe>^wW zxohZs%3J0KRd!PfYJm(W;ca;Mj|y!(Hs6&-J@nn>qYEFqDCpdt<-mZjY8QJKwdsN= zuUBSwY7{Tr)Yrh1idyu$7OaY=pa*+2YNFL9hEKhidfJI()+B>xu}peS3f=V*l+LD% zKFP~&l^`!+2NAqzy1*W3zEFn2IQb?Ue9)aiabN|J`(NCgx*~6WtPe5A{Msf7_YQm{ zKsOSxAp|}P@AGpY{=s0%A^G{fS1D3Ho8CR^t{Tn%xo!ytK&Wtw%WLS5b#au^!V9pbk5HKkYIj?^;@BUQidi~$Ot{WcYO~0zc2qSQvJc@2_kBzJI>s!)V0n+l#@Tf(wN1Z__irifi5*Ok(LGPd>{*#ZNct?#XwsWC%q7bBo&;P}$VxRj z7Tz1dH1y7}{u6A#Ip(1xhry~iL#VR2-&j?Eo2{z8IwH~+U$&wEnBv+8#4?_D!Xh$3|WR@n6Xcc;kl;! z?*4wh-{<>!J-R`jLF`@q*F}%^)mwZb4>f4(kQPKt zK!utX`k_SyF8+Bsh>}ZNG{-8a7u&Wx&h*+tU*_e#=bLR)&tJ}h3eM8Z2R#Y?EtjD8 zfJM{7f?(?d@TgOgfe9`S{UT;J0?SsFB=GxsK`p&klC8CnRh7uuHs=TA=+=38c;akf z+{Xxx^af}``@WDC_Mn@XjRJ3s0lJ5ED_pm zf2@943qj8{<-mw8f-c2l^5RPyTn)Xss^+_wgp&p~K4a}3Xu7d&%kniS=hvI{*YM~h zED(%kM-;ow_%I?m_liHk%rc8Yf5>;NbsXK7e&)EvtZu@Te+CI@vK;5 zugKdUi*gep6Lld7*|{9$ypcWL)6B4)I>t*8s=4w|8r$5=%!hCL;VtAhACJyO3aj50 z#zi=HoqG)WO-wjB0yOu+!PeeWqK&|+2YByawLb6}MugehvT`*OdN?!FChkzG_eP>fCyYVpty`2mzdA0&TDS>d;*)&_F%&WYzM zrCtNYCS|5C(!49#OBn;LW(yyjgXDt6Zw%yeH@RBOf0WKYYG!iD?uya+DQk>JuO_|B zkoUsFO!kb#DVN@3;CIrNsU~|%qjdbG4;mPHwv72B5-_e}SIm1a2g4_UZQ7ili=YBc zGyF+jH^^gVYdcZ3AH3`IxB|_ht$-KVUlH=N+Nz`R(rUzbbo>GV^D za5xWAA+l?R$1F@216s52tHaKzW6^ zz-3CO#=6l%uj#m1a=P11sMx0e75c(?B~o*;ewxc zxY8frwqmb4-8q304JrtBsmqI$CPc2R4H#z>4os&pC=ZHk@U~B3WAr8hp=ISaKl^BR zMV%pK30*1Lyax1Un?6#q+;ty@a^MEqfsy^#`K47QeXDB3E^ z+?&??xf$MG`0>KjEYG%jN@A+@;?AjyN4#^mmHHR8 znA$Fsxq`(qU|+ze%@PC5$k|%(q!#Dp{?!(4Fke8ys~@!%IyZ_Wcwtxi8+!hdXf zLd4iCAnAGO%&q;!>qZHU3iO4nNBE&LqrtIrqYC05>P>P{(EMxk@4zHQ&BxBptazj9dGzDgSSQ&+#Yi2(eRST@9Z|3Cl0dE8dqQ*7NNGL@zB z>hJP+ZP7o5)@dnUDAjnWo@WB-W`FX>o%S3szJ?}(x*9FGl8Ol&i8m4@fzAUxGU z#&obs;kJrSTr07U-_q=@M{a2Kc&L z?NPG;!wf;W6Iru5mYRxxXdCqfc%8e`l_<(6y2 zYUqK85Go0`gXa1GxiuHrU%a2g#1CdWN7Gz=@kUO>f$}Fu6|m zE5_G0Z&(l+emi6~qJ_B}Dpj~XKYn}50OC^I2#VlZ!p!OmWk9!hV#Mue{SBh z%s&6?-BL@6DrDH2+Wlnk6sVg?FZUf)T&fO1bGh36I68m}d`3Ll3oZDSCrD1K5bs8W zl|-q1uSIcXLoz7TB_dRiJh$8u!$utIVqBy4Mi0r}UpNF#4Ip#KEnMhE%wX^N+WMB= z?$seyC8XfRGl<3mP?fncmqu!CF>!pG5$Nr}=TENwS<*ZRk)Fq2hUU)$FKNMY+~Q>V z`b~3#mU50A53Wd#tqrzvd4+d3=i<+-@6}< zJW^0)aJJiB)|uKXdBNEzbVQ@<7OvV5mHwtBaqCU$j>A<~O2^xaknPp493-G}G4{w= zZQWGRS;Qnap`|=WZ)%>lqZxUb$_|{E7Htj1U2WP=44+L*)U8kHj#!qImX1$BHyA{! zP#>%O*o&JT8>M(JXTKB_)Hqt9NpI%grtHNbM-dqK?A(|p(!?@<#GjH7i)I)ze=SpT z0--_ise>!MBFssmyz3XoG zOea3fAqEK#z$)H&hBsAlD>;y=*^hU?RQj#IviFGUw@NJtoc)m?C^>Py#*9RGQ1STH;>vIw6+4QfpH3wg=+#cC{LS2t_=34xFGb7M3FH=)@ZC( z*MR#TVnaRAc3b75Hb}j|iE^xDn14VYk9%o@`AT(&Ld0&T7Lp>p2V6x946F#t+&Z@8 z5$nXGKD{*&fUR85psbInEn(DF384lZHq}-K;5fpci) zxu$`LrFQ{4igykkB|am_Vn>YVdkuQ?NlN9&E@>uHb4s84OYn*fhl%WXI6FEBY%OqQKcG4{5iYM8D!Okzf0e%PAe@h-o4a0 znn`4CoZ5>kE@R)O4s^^M*4(w5-ACrtzVN6SbVZ=|SV`PZz`q&bQ{z9fm*(;g(Zy=Cy9>f;@(rIeavPy3?y;k3lZR#M#E+ zD^{k;VzeLkRzW$*DfMa6yolMkX_QUSO^G)|u~KW;hMnF_Tl(>icvpm>H8^Q&#W~I) z1cY38x*&`LmvNzp>NS`-+jIQ~Bl(VJxVEL_5su zpmDFHp!<(v_I)fNtJgv`>@eBYT!kuIvOmoYrFuk>OXYm!@Mh_x^?n)4I(YLyxUQ~l z{C&Z>?Vs(mTZGP2OlGNR)GoIDpEGxbKu;?751u_j7IfVof-VbV&v++(LkZSVNOADi zzYA5b&VsirBu*TgSE%|w==(b~Kt$8LphA0_Z8J$gjen!*@DgT8k z407}pUz@{nwoYvN*j1FQ^?QU_+v)I==T2Ql#E*@@Rt7XN*LF&`MlbVbYj@d}(}EcO zgkj}wNR4NR!GT~9WdQx%AsXhe`u16lV~K5SHgA&<7#b}Vr_o1xpQz`qUu^%#p{2$v ze71|e9#sF~!!7l9SB&22CkHvyr5sMX2uU2nG1P%le7gX7%d}s;B2#aG(r%-0VJRuq z)7?M5zAMRXv7<60P6ls2R+%4+6GyZeLyIBW*$l|qvY@4{jqJ{)ii(4hx11{i;mGE! z95x8DguCbJK$vQ))vQ>B)-wkKr|7WSXLSy5n~wPKoq87|!^6{(@J{w@dI@?=hAt!3 z&+PY+>t2XKNd^nYN~D_bl3XF5HeO+GlwbVx*`vU;rF2ndNLy)b-M+ zFZ`;sLlvz+<>~H#ltw>0=bdD}zC`pNSSZ_Uo=}v#Q@37uhM3;3kDYeDmy+*KGA9dT zxqBPZWp5{ZS{CEJ2=?1eaaFxY(I?;B4=37%*)r@oHtszN!?3TUP7>OT7^ZIAJQ!{` zq%{Yb#HpUVB6Fh`g_87*aCUUEPs;BF|rU2e+`g|S#-&XgqV;V_*u?iPS` z>7A-0MMibsBXU1%ShmbnHz^-sHX-D7ySy@2sRrbrOA`7`(+1_~wmZ*{kYGiA8VsY{ zfJ+h(`d;+<(?^T z?d7;7SCqsc=-o6^Zr!{@x9HdYJPCHIV{KeFXx(@RJL?M5&TfadJnakF8s_KYl#A9s zwb#N6dSzqkvNQ>5vyYgsDkzRYXFO(YE2vuy5hoV|dcuD9Mpw2UxzR0oAzCo`xz?aOfy|F_%7Zxi`3KIdM_FYEQp9m)fEU$TmpgEPcl# zz0B6b2i%P?X&3oGNU#f<3ay0Vek{D)8hI5u8cX1qMy$5K%i@CP9WU;6$oWbh#c}gC zoZRO34BmHs%yc{y>k)jRnaf9r2UD_6-b0UD%1f2BOcK}*zMBTbfWt4&?q(xia>@O! zeY#s|-~=1wAvdR{NTyhmV!O`v_q!P)o%J%{S~%Ac#Vy;DR^4~odGR?=cq zRQw(u)o)xEkZYcH?%OHW9_uJU#!y#q=KXn&<&O-Sf17Yu>Sb+o!}ueq{`_tZ+Rif@ z&++DR3^jz2o|hg!+gl&KjvIk`>6C##`j76_h~2$spNo^KzA*Fa7=9Sj)vw{?dT;BQ zch?FU$tcD=4xwg-N5<`_?-T{BkrH8}8~m-dP2v7+DqwE->jPY*@7mj-wnVuVYQ_B~ zzu5vkwVhuQ8VOG$Q>Q2;j;$3tzE-qiyMBn7tRCY1V{++z*!T?%)cS(=)NJ0iY@CEa z{K$w6@oTB=0mQYMqRQ)3|DUG;paR}VjcOmzT&qiGS~ZQBN)a4kUHr4;L2SxYp+#~~ z&PHVJmjZ-H(HZU*LXpk1zO%NfygWjRSmfrr8r49+`&Ud&v_YfYxQB~Rw!JDKb#hYh z;?6th5a`uheEzRob6EkhG9KFNiS%<4@s%BChcsS7)GA|hP_&cLXE|mRZ}%N{{;Evv0OBT6bV?{l`_P9SQ@>W2+-G+= zaX}Srkw(0l8YEGtGd@Z1=K#6y(WBZ@aPchat}xiG!oUUfSWl`})2jt1=DWCdMeZ}g zAMMAhn8K^rHzLM=%1j0;pt_Nn`4b{Ywhela3F@eGt`$!_^)tMpcWN_F)$c0hVPa8C zrwVs(J+)>nDZuxkU+&h7JdsiTu$U}8uskKk)N3&;UnMwy8+JtMd%ZtdoH>=_$Advq zvn>~3{ei$-wM_0J%qe=|!-)CI4E_}d(ibt0YXsS*cg6nQ zpy9V?Flkjm6tN;MI9JtA`ypy&TOM8lY+d$pLX3zN1R;2!)sPKA9=pk6Yln_bGf`Zz$UYg=hj%~wTsr-S% zd%j-VwAil5YPU?1fxsK?VKCI0*F95Gsb5mF+Ca4}ZuGreg$H=;+kfg}C$i7b>Lf9~ zQJa_xY{U;OzW@{z{mHc(Q}8XtHPKg3q)Y1jdagDx9?f+s?iwfTU0Cpp%dQ{Asn*=$ zMJCxf%Z*!d4(67bm>IX!ZKQAU_AjKOWL*YOScdd_T37A&azvS;;1yRC(Y6cR>`8xD zb3Z4n)Wn{)?z>EW(S*N8I2DmuRya%4YM7MWp*;N(ESoyBw-_B}54nlGPGJ}jMn_i* zYG=f_Wp>K!tBd+bDJv#DbzSz9pwgK&LGr>R1?^9(n_ccJ-x%%6Bd6i?dBFGm3k`Cp zDZL-LvT1Kuu>0~k+6HAabGb~S`U0Ja8e=6I)U@%H_iHeEopZTWqO?!kC(ySJ^*a6KEF)jw#05CB?H7s*4vD7cg$u_Us&CvafV=o~XMBXsk{VD?pOR4= zn=_M9EjxkBF~CNT#mk>3qsY%)%ipLxF505J{7TXmj7HzZ+Kl>5a%0!SgmNDAp7Xs+ zQ@NH0Pcl4%ojfp33jgzwFPA;CDx_<;o)0 zE?Lq$nKz=`n0g=CeN7|+Medr<>CZqJevkB$wlcS+1&*a*^xC9pulHQRmz~BTP~Ib z2N!HfvNOXYE)%4L(@F1pGo4%3Y@ZCCxQ_F0?-q}@IDYPB%G-`^QXG-CeuKohTxD=X zuGb-+c(i*%P?MTNpZ}hDzo*(ttEU7mH<*Ka?&)>o{gku5uU}_SV1Z%=nvTBcZTSk! zNBPt<9U16nrVB1cCXD*$7TVF#Yjb0;&F2D}G-2*IqU}d0tjgfobl36R78kXK>skwS z?W!JrgZhVRT@l}%9q5+)WOGyTZ)~--4B|NF?A)H*8){vA21~B`jKj9$s20| zZ-CHkJFOGW=v5_#&QT5#+CCBs5gT(05^s@fry{4Xuo0aqFYyCO$zq~r#5UF;W?z*n zLSTGt(@_RvSHy+5Om4mOtQ?l(8s}_IZ!Yr&jsfSl!UbADoPOvuHR2WDcJx0X>_vl2 zQ;q734t2K}^24F@v1glmsKRmZrvtLJ0h&~BFv^>-qnr4|hj0mJg5>t#t_-)Ly-@uW8+#wA>j@@CU2+Q5mFO3&0|WJhAx<)&w=L&PeZ z4F4|@9y(#5Q-S@Bfpy-*Dx7l-<&zsS^74pxE&lmTJfANe`LWd6&-caV<;gFCBZ`$N zcZ9RAU!8mdAg)jEmU5gP0Ru1HH>JtWI4E}(wW+#W8 zU+==N9pERUW47Ln$My!j*uGzJUx2oCNI(@^#LzP%%vHi-!iv`fN6(QJVPWmbjB5C7 zzh!;!Qc2xn)oeC8r9E)EOrwoo^)ylaVYZmW4B5_Eu`X+NR;nk<0=;jGv;2W?nSr&1 z-?-O<*lkjGEFGb;&;gs#GQ5>Y#rs@h3iY?ugiGu&C$A+HF=n=5Zs?QXOIkfvi(HcH zrIn8~Mg>FAwM$IS#Mbkfyzq;Y(|K??@3^W{N~^u5`uxYErJ56#X=W)?dfZ54bvl~S z?wl$=rN|6O_tC*QLaRm^vae5D^eNXOsb%Aw%alzHc;cA8tnS>k)(%1ST&EM{b0%k* z29#?zFth@$OD5H~ORJqu9|2WkRj2mG%t~2VD>U~zU4rEIVxh|z=l!SJe%OdouNi|m zhi&wMT&X}OdIwU`!)1v;_!FaQHoQ|Q=MaePGEamL=&*;MJF@El0uf$V7|P!`cv>w4 zDl>Nb_at!ei08*Vu1%Z$$$cXhe(q9c4bSzZ;R!#m}DVJ?7L5gQ; z@ZQ$0QApscPY`66d(;P0T(WkXYA33OT5kkt}3i=D}SIHNb z@fBSF%Q-e|VHfm0<@v$@z7{EjnL!U1uFp}>^!0v32-U&DM(SDk!6DTnz^10qks71K z=L0!;2$%K5MB8!MxcO~*bqE!L90jE)e#URL7G`UOhm~Y+_sa!M>!05F89vi?G_^Bz zaUjLGK<`i<1@@`XqBwSqLTJU)HOLF;6x56DZR||jhwoYVI~yC0=`jRbf6E|$f?7QU z=DQIa3fu8O$KVQX3#cwS#-%s4UP7&3@HIT_84LtFy2sZL^o#+i8sVQ`x==* zANAEv3#JNSr*T4XsVR|7<&8hJ03o9meq%wyvc^VbIzfRXC2I+IN#E@7Zq<(z)Oncp z!;F--@gMYL(Wo7C(f;kdIWbtrqE)}BFUB7Q)qb0Z*_nP zdVPJm6BK7Lrf#=yK~3|f@D&4N#B9Zv$wFrnQrt`4sm5`hTL$Z*3yx3sQT;kXID4?x zxAH=o${*jy$0t0;U%(jdu;WV}kN*l!-?Hr7Y0i;<(3crol!*<^vI5hJdW^mXZM5Ky zLEH43zRnF>ed4b7LgQ8&UKooo9AN`jfz4{H~HghCLhGTl1Ez9}ws zq&R~4tU7EiIqsb(vnMXwqt<>lYpfRkSVOKD_0>GSB~Lsi&#xlKRJeHOB^;$xTjdBX z+H6Fm@c$xwb^~hqm%`(l?dJx%sO-2Ps&Lg=VA*JYWx|u;^YkG;F8nO9w|33FP`*Zv zZJ6`1o==I*Sd}5b?jL!3%=TWTeI?hQ}r)Sv+g@_ShQQI`}rlg5WuRQal#{% z#O{%tj-LtLoM8i(xi0-$X$p>)5-}JE^x}zK>1?E-#`W%|AiJaQa@hv|u*y3Cr z6IGWr*17Q!JL=ewwvn(!GUV~~pB9FZFC{5Dzpq2fGPWPWcvutAAol7fHrh<7@Ol$>@NOnjVb@9?O_+3}s!Y3`J? zpk=#q<2@9H@Z)b7UR*>7l-9{VXTc>R?z^$zBRH)bR>pfSRhF|QMeMSK;n8H77IX9W zdQ$xStuY)LIdPp3q*ObkQdCAHaI3EU-O3A_#lp7Rdw~q2*diHCeWcOq{tJ>(#YVK0 zzYaY>ly=S0WU!C8wpa-ljNO*BkkNN5q| z1RYrbO*HQTp=r@t62@tp97r42dDV(OE_1ADxFv9@XUBN5`20G{<9 z!z1;~%yMjgWxdYPx~lCvJ)37g{%K=0a74aUE9}rpt~b|#15!eTHj!@jX1i^%@;du!|ReJ>7C7Yo|DH{#+nJ9@BQ{ zoHyic?ajO=GoS#2X_BwO1?^)K4*pR(j=IBXziLU(rvKRcDgG)uNNx>7kGbgcAz)~6Hq=} zIKaUAuGkOS`sZn z9V0CaFB2aU`YlaN&Ym6VqHF|R0fvwnwxxSW0QsVlz|=b{TQLG0+t)S6M!cvUi8A?O zCkkZTBag2v|D@T^h0|IthHsm!kLEEzp;#rQ`<9jMdvQ@IoN(e?>%!XFYA+m3x$4S} ztFT`Lv1akE?=|V47v9a)i|e4+kUcxa#fgYbAKYd>ji?MN9YWraM-w32~9aF4G7OiED*RYu`FrP_@vl zD)e?j$SKwjHHEkildHYyY-)-BB@p5p*x;DclPDFgm=*Ys3zu+6r~?7$6+eUrb<-^m16c~BVlYt+9f-`8Rrvoi5=>D2IX9%=22Po3$H}<(xKrkKX*^&R7z^) z<=yaG`<|2G7Y_ennT@@GsdS48*lIR|%{xo9mgT+6;cXuaKrCs~8pI7XHcM^b05H%! z?JTSB?x~F}g;ND%)tS7C2MLQlZSrjq_tD--N;J9h2MeySw!q-nNC?iIzZ!#r+`o}p z_scAQwaYNpWW6;So*o!;y=V`_L)8L0G&_nd?l1Ymcx-~3$c0N>$Ql0eYoz1``+m6f z^O)Cv*$-eux$Li$y743=PixpUW>b|gRG!`=vU4Zw;Jjmj!7CPj_!O`@q0eXxRe_gW zjiwvWZf4Y)oH2M&rm5qAzj7h}mb#Vk=`4T8ZuO2z-9hW*A|BFWLhPEIR~Ca0s5+yhlHFS&b;XT+;`mFn|h zbzz#AHb!+K~nwYf?q zg=_WIXt)MjRt<1Xt~N-l2-=JZpIV8^b@{*u6i}IP61X6#HenBiKoZ)mSLH10ryVv% zL-||CUfO!PE={ArN$~A=Ev2FiWaD4W`C;c4ugLZ3FAXKPl^ueM#H>usE+&;*`ix|` zNfCB{y__Mtd<`Snh9pzr!jB^l_6dw+zev>Wf3Y#r-79aK*4rw$o;RV{Xb98=2F)F> zI`c<6wdd~C=U)>Jn$p>T4cO53u;Xf{zgV)+#1%mM>M&R-?JB^;?EEev?1sS^I=((P z^RH~&$e+rSXc2$7l4H%V=N@7^@adtp_)iohXkkNw^b9-}j}Je2x+zRpoX>%2Z{j2Og z24%NeufI zy0*QZe`@F2uw2$%4xFo^ZJ;u+nb9$Iqldik^Iuj>Ap^4w}uY2}iH z*z%1YykW@6;2(5*a{XEOoU-@?{&;7qxCef#sK}<$DiKCKv!(=D8-X(i4#(geP;IUJ zdI+Yj%Dwr!DqA%s*?Jh^*yurJy|5kqrjeG{$}RN}6OzS4?;J;Hxg0$!5|KgiBOIPf z%gvfjmv&IU0iD7 zS=Z8a4J|Iyt^DaO&jSn&KYk1n(_J~ajHIdSDni;OeXZxP9 z0vpQ==drs&sA)p5y@1|C%>e@~izVSAfPcS??)|N}$11N6xZ0cP6tA`zCOm)6p6_@| z_NT{nc$A&FmCWmzj+mt0+}7AopkmoNW)X$_P+zaRd|)|y=VzuWaPq|JsY};mV;hFR z;GR^JoTcQ7W1#rTqdMlY23nAbYvuZq531WK2wgA-JNoKy1H5JBz5RLAf;?;OY>g| z84#9ufeYt;XdYoXU5NJ*6((z&7k_72AE^CaaQ z)QS*Th=yg3fbICA6H?FaO@+h2XB7&>V~DYxLIdcS*!K~}c6J!P^B1LBhJ!H=XP~iz zH%+h+%{8)l{2oPphb`=Bp#1t_FlJV0&Upi09`Yyg`}Y&`KHvRkZgIBBd?k2jg#-w)5jCq*!*8;3On|w% z#19CtvTx-vu(s$5kQR2BF8*d-pahARRf2S<(|k9k1Zr9JZTg2tuFUw=T_Fp5ZQc&s zVxH=MPe@~`#4#2eT{<+uKj^b7i}kRFOnux7nEB@@+c?jLll*=b#)|7CP0?F`uCC50 zcfI=CsIDFn?FNcAd<_6HKWi6o3To|iNiq=U*Iu*YyyT2E8>mmVCP1~^HqV6TEF$60IZif|g|VD*}&R2K^KAmn49I9;`-s3HVhI zfJc*giRS6g0R=d+QuybvYTiLph#Uc5e|5U3Fjn1H3YHy)JL^{&r--qX)@Eeaq*!5u$skU8pA_-3Ltsrt%b^Z#xe2A0Jm0)HX=Nj@#vsUY z4L8elIsbBRz>>E(0rBk&ea`^(9wGop{+i9{mtfU<2(bJ+^WCGpxy41XiS?j3sn=k= z@Hb*|-D*X_VenSn<6#J2=cl4Sb76U);%F&7Y5VVCE&>B8H=y4&F`{QLLQz)G`K&cf zjCt%-^Cqu-)|7JwV<8l6sQ1`RwRIo!yXRqBy|g1C9zYM4ul*lu=F9;Z>7p3=ano;w zc8Pij$lXH=iKp}C`)$j6-}aHJUV4OBx}GPcVoCBAd3|k&2h8UmlVG2pzy$`RZp89V zI?RlC_&KR{BN9+LxqUC7wsjpbXT@Vhaq3q9wH^aZ^Ios7{om8D7yf`oYCJCTyx7I2 z4mkUo*Ghn~D&QM8f7j`e=I`9gGLSN`HWT;g8WK~3%Ms(k9dv4^u6qPS4zW)di5fN7 zj@Q${-}2hPZFjgmc_`_ihyIw}+12ctR0FzJhENnuu{YN-MP=NN`b@b7pf1=MIz*kj$=O|#!36gn5yGi&LRlt0=C^V~1Jp>wm?;X^< zk$Yxfc9puotK7WIROEzy!*%pHOM%pCui6_7xk*mUhW)aVT}(%1*5T>loaGA&vmL@!0(rWD0Wyl$9IPg^K&Si*8Prlniti8OR$S zwQqnr!#-Zc#twsw=9HL-VJePR2(P0@3^|lrNr(|Oz-oRodEaV+1V(34grN`0#Wi}h zY2HrH8FlJ~rUOo7jJx|Y)tuJ1qqDg1#r3MH0Y}^3A2z`GB<|mXa&?P|E7x&dAMpxx z;xAH!sS0vImeQeoRipNgG-!MK6s{@;ezO7PZx9L;0uq1aN9Rko3j@(%`?A1Wxxh(yE?5kGS z?`7?C1AT*7*v^EyQyB)T#s0U(;5r1)fzSZZjQ5y};#TeU8baP1kx;|*O%V=XGfYg6 z>`b`AJ(4V3m3)u7sPJ%ALGYyZN>ci)qs`#R>Yynv%6imBqGG_!ZrEs#!qnF+m{?v? z*T_~_&?EN*aM{S}qPl{W#-#EicUTSM`c`{}EZuop0X!S{2<)Anh3}fuWmI~EcTm-9 z*8~>2<`KghQMFx$tLj4?BQrP)f?vpXHA6aGQN{#xNmRQWeqeog-x_S8kW$y>CUhsg zC|#vtsOvfDhsCP8kk7UN#6~6}efrhA z=Dyku-JEdiDa=*SQ_Vsa6`-mDzW&Fi9stE5Lf#LczZ0%%AmM`cE`5XmmPfqtVL&xP z6H}FqHMHieiLM45Iaa9&-doh+o_85xnRCp!vD!VFP!$+^5Ogc1&plU#SaE&xHkjF- zaeVVlA#;TPlzj7mq76Zzj1* zI3icCq|bgvz|&s_i0~&LB1ad|nlKXZR7CaDN^?B&= zyX^YTA(mr%0=GSB*mb4e1K>{shgA>!1I%YwVI0PC03YD1h4pi`(q{p5)cK8$Cs|qY zPZpvE?|vLG2K;oh8gVT051ava9%9|84h-$u{oVj`c@|{+6-ijvcLV+5{>9Ze$zaie znBpwYzr&E`6W{e8Alh2Wp8PXs0`T?} zYe@cPyf%vLSwga;J>1ITB-mJHyU73KE4>)#m4~(V_#Z9r0uJ+GHUCo!P^^e@hod+v zr#$f+Apalq3#_Daurh8DmhwuS)z7oYpQgWZB9;+6;Q~(Q93b@Tc21axXZi#-A|3~@ z&mREDE6x_n*tc&bIXO8;F?S@V0{>!?SOOegOxN7VJ>dH*MuOC4>5ZQNDzv6%_|wid zg&$fTYr1ohHRO(f;=Bg#Jd4r-^dt43`bp-7Qz&AN-+_y}gTapjSjl5GLksao+D0k> ziquHKbe2-Av;npY@$OjwKcFlkD{%f%R<~j|H-Vs+%E;x#EhN54{5v)S?@(EG()*u!s0QlKKTz0+%WJ)2_dBwTqvb7P zb8aWR?c)ZHL75af^1<+1EGb7Vr+RWDb~x~jn0SsGCKwcgj(KFX?@v>Qa{v*5z|HcB z%J;zM^A9*y%~`Gdr#Ov!aa5;w-D_+Rrnw13)~vZg?5tO@*mLk}>qwP4@jl{m%#$`i zw8}_L$-c+w(PCtc=e)x@UJmt@Z9?ay@)xSDp+l+q8N;ghanPS(($r(&Zlxqai#1FJ zz%Y?yu*NIEV15}r_>cSU+AY`aj!T3-{4o40@KVuzC==!Vj{hadgPW~b;^$u6X}N4f z2vqX)RSDQyu>Y8ZeCn`Y>Q94z8fgdI>7hY8H_KLuEVj#CRlVjl&`9n6eyhAAl%=JJJn z7+x&)l1`i*ArhwJ{%OvdB@=?9MuTO-mH0_5Q0uHaLWhIngu3oECAAMlaxnAI{zF(Yta_YZs6M zItdOa7d4I?oT))D~gL^ZH_6@{fGXumYDM?&=h1@K>5 z;tu(8hY(nK@tgZWAAFmV!7TMl)y94x0Mu({>kxC>&R6s;QyI%Te~_~A70}%TgF75R zTcvmah&*l-v4%wUBx#Kkp2a>1XZpfoLhMTz%G!NLdL2)!J#bJ<@*PA|7T0P*!BS;aw~=$ z1yqv3h3)vK7GOUx6m^3CTYLt7`QMt}{{#?nS91H=Gf$rU9W2L=8359m{R16;^zj$i zusZSAIyAd3>se=58Kui-7R(KNGO|tA1nXWf_$S={XLF*vfL`r(l8*u=V}A#$0Fz6A zbmspRLjNf7pEmDl0{YhEmD~;V`URk(w!$L-(Lub+rR^KYMwI+`>C}G{=Wppkb^*QG zm(gwAJ z|H9xuG4o%~`q#7mF^2z${r@Hy|B<8pn+^V%;yn2`8~l%%!oR8ee@#sOW6t$&g7I&H z@lQYW$# zDSYAdrch{E?(bEW|GTj6GQg`ZU$}p+18Awq9@3F=T|Ma-ic>B;N1GOH1NXfZYqHf0 zFfa{v02SkRtWpzGWIDAW@u}SY>RrDzE}fu|TTqUeY_W$CnXILl|ISpg{`=!jMtQ-t za{=cnP zW1X>6sjM+sW|%Q3dkjTP2)}E(Klgq2eCqc-zQ^;|b3DiWk7GFMn)my<&g)!W=j%LA ztcqkOZ}dUL!RM&6go8gaFu%NFZVXc~T-@o?aG{gY746)Ngv?{R_wmVOd9(VEYvhLo z08A*!*=s-CFBiY>sCF;%j-KX;PzOLYSmXydngG)oJit9+SHl!b?YFNx1SHl5}BXzx*gt|95@XI0uA=DPOwyc5p7{MWmXYaOt zkw~XUmIDe=Obp!6Hd&)uRNi_F3aDCl+C+cZ54r^WLCDRb`9;ZVs0X-{Rv-SvG z_+CKhL5mFIj|lZVKKT1ks(g)9;$pT>k`HzG3YXTgQJjWbIjc zvM~6ybhwPOd%*c7k)J2Eu{w{BlJMz3Z9W7Kc^6w zSmmg@Fm5z!ygtE}rr-@mnGoAJdQ^tke5NHWkD~~f#PoD%`we1u#>~4ez0`Nf=Ca-k z45!4UlW(4z4-Uz(zvuMwjO*KC}Ir30wza8h8GlPV!L{eINdH4kA z3f}&GE6JqbNrkCtLc;6K$ayvG-GRqG59=6CZa;gMbcsEH5-=by@6}Nr*=_*y8Zw$8 z5C-&hb(K#^l*S3blsnIi>*{Zmbn(pc^Zp=W{Wq8*lmecvynm8J4@Ar80Us@g9`M~2 zn86tuCW4K-p`qMA*Ogj1tq*l8PD)Ac{B|{#I6M4i-Ep}wr>C<$#YAnpH=|#5^SX-$gr}zm4r2`HIjwsk#1jUfg?&KC&Ul*LU;Qcc(=r5sg8lC+9((*WpN_c{O$$ zhlBb98vJ_#4gyCJl%W2Ur_x7i;_`%$o-E48kfqz_6?bXt-Pye)(tM(4nkFtI^!@E- z4}V<5A;Ii5KO~JY)4N zRM~{!F)RsF^2O&WnksmZ#6cA_v=Ldc@l6quheD^2^F{O>v@#zGDCj}pZSb&#_SU1K z6lMRU9+$3oCPRpuhRRy*>ICAE?~Aj)pbvBvUnBt#n7|(=^*I&DakEp#Les7~BbOl& z|2?Z3ijBePaIE{r)D&EK@_^XGzD+OxHgR@mDSX!IJO2t>?crma$a+;TDobDR&E0d0 z65`@c_qK)pfRUXn(Dm2a_Z(IatRb7w$N|wT zKB_pv;oq677h!-UDNw`h@}si$8>n%d%djxa1pH90PaC(dm+h|0s?wtUai&GY+vO3P zgR;L~qRjQ~_yRCCAoT5n;{yY-lgo!m*K5rZxIT(XoG=Oeq*i-TKtZdie_0miPHgl| z%&)1r5M^L6k*|O8Q)30zeEB;iY=Jo1m3Y1`R+_s`!20{%A1ulLGQ_!8fM(;NQx)&r zX$DlhO;NHBO%#t%>J+QJdU|(Vr$wnZB1!p%^~C4TpDEdp@Qkbs8PN49{_)I7t4jOC z`L>Rfr3$YH4<4jEa;1u(Vf;fCs#pD{CJa16!jxsy6d-{dkCGME-YP5r|#dip&u6;_~Gb%uqzBBbM*>lV-f;^=sR7w ze?KB(zNrc-aofy?&vaINU{-v!J+%fgryc-J0dc1Ak=mcf?3>Qz0u5bOax z#SQKa`s%Dz6x8T-pin9b62IN89bqZlCr^pT_t*v7DPW+}%v|WK1+i@Z1fN+-qx0^! zeRNL05D!Z-=YzgSPiN2E>OG^Ra7~oOr%~N^SDiq{(`$Qu6nS{t`l%UC#;f~=Y@w3L zd<#Fg{69bFpU%z;8pE-INv^F)E!Lr=SvAX}+{d^NMCkbz`knr>@kZR86^@vFY^la) z%Do&mq-D=wud>BV-~|kGkvBr#-I5RU@IJZm`QbG-AG9p3Tt0dd{SGc#Ut(6*DyBy+ zx>3I9_Tj&E>YvgP#uxS}LH1PkdX|}!yMHssgGWh^^@T+ny!kHtcuXmg%c27b=8)s? zJxflOUe;^+Jukg1GhS@eUCJeEv9(3Ea?PyY88}kNY@J(QciJQbd*j)LgTHg^PFa=b zfJA*!PmcvWT!~_K*ssx}5a%I=H#%>Yx}!Zi)4AcpcN{-DOnhbQiOUW~GQVXzZ2+Lc zT<$$)@b}PX%yohul#@9gJ#ppB|m?PIG^6veM>CGDRHbboYL3LhLg$Gp}V6~MKh zOFZ_`W1+^62UHb=|8Hl@d-9}Y!o}=BO&&?B1?EIf!4v9@OUDl@mDzQ2JlaoJ$C{Ft zufRmlR^r-Crb13Ja+yv%f8Sd33ag<+KV$1Z&zQ}}uH8~9w!~IM8!Jn**u1D0=#$xc z{r+zix)6Q|m@z5!-4E$|f8)}%fRINvt5h>r!K1<^57SS&UOBTfTqk#KbM`oXW$*|S zJXiUQJPbpXw=`v(kIrmO;b{4tb-YmkRz>?b1W18Jxfvwbye2PttEJ}up$EF~oOwEe zWMzQGzGDsZxE4%|`d=;r7IffK-Q=Rbr2qpAEFh&H@pql}Pu6OT&L8Hu#Gp+VFFpX2 zJG*|Pla5DUsg2}lq-qdbMO$Wn6?m|{bO!OR#4d^Bpeh7F%gporOkFQneN+_ymlu4k zb;56t?gbr^7I?fXf_`!2jsp|a`?;EKeZqqq)WZP(Kh`jUel>sb^zH&`t|E(An#BcB z)&U@P_QB_DkHZcC;VEYE0bh#~`K>m;vm0ni)zhqd>6G3Zpd2r?;DaZ4lsPcQnj_1+0gC4bpf-qVaHz`>gBpWy;ut1tVz{LM}dfQAQN<2pnK_VJ+K zj!IKKxt4G7AGpJP18@vfU399v~4bUMrNo#%v69-t*&la8U*m zoX-99cz++E0H}Igc_DI|P5{r{58f;#q8T_?n^N=Ro2+1Cf^B4f)KC2VAO5ol=PvM6 zny1?v0BLy_D0%jHVGMh*Hxp>?t2()c;A=(OZ~o42Pyy#Y7p8xW1+1eX4Azij4+REu z-FmQf;{e#0^%|$&vZRGrz(L%*mJpx=3Xm+pyWYvFyK@*2vRj`;(dEq1FyXhC?lw4g zB4gqUPVg{cx?l|}zCRs=vRK*)`o`R9zxktV;lI?A1}B~8`%+{%3M5#{z;o_Y_T72* zCb$Vk+;ll>O%eI+ZTw76%f07{1v+HBF4SquJxkK_ zaj;AbdYd-%)|1WY8xu7jws?b;z4TT^tW-{d@TS-a!*zTOB}6%exAT>vpTYWP%UNHa zgbS}D1bWZ8<)}|Z$>}d}i5_Dg_E>JvQmk%_HA*lR9v4wr!e<8vG1`7#H6Hp3%X_$S z3#mVIs9lly2n*lzFM3sou4`nk>^%V7VN;;TPZl;>*`;e#Th-FGL392wR-evjloP4N z`n6WgceHcD;3q4G4ln!i)WrJ(CtnM4( zZAcf*8Loe?>OIIo(s#<-W~9(OU^3AoZ3%{Ui!w)(`f6*2Y#>OcSh+Ibz&pjz+xxR4U;-5_7$D%p?S!#rwj*4@-yQF(|>;ZVag<~j+v`Iv;n#YM3PPZti{G%pyv~dqH+cuJAI6BPw}0dqz}03!Y6|^@YQ*^GrP}@fgRC9Op&0 zKNI<#wj}BqC|B5b*Dc$1Cz&jE7P0#DcxG$RC>ezIb^PwT% zMkDT-3Z|OBn;lpP-SB8Z-kEO0-0e@<<0JKLi#BVhQ#G@4*JWMLfOxXq)8is>E|MIdoatxZ>K0D?^j`B zc-5kwz0!a8B>0a}7ZL)tl+hvmkO;834Ng>SRtb<_NQ(&r3;5U1$phoMk zFq_ZrcC(n|XSh(zQRi@T>Bm*#l+KYEnFq7`E51_PEf6GcttYqzw5yD}R4`(j^wI@8 zM1kI>S_D+MdgPJs5g9(Nl{y)iX+5qlWXei*Ht(f=-Zd+Kraib}we8)>M6BI(`)a|P zg-kib%CL2h{`{e1$LdL}(+KgziHkFrf%V^P%GntlX|<48m5+i;o9Hm()lqp zq()1jTiWLsnyH%L_AL=9JG{a(lI<1hfA}^u3RjMao9?k&$PW0FQx7Lkv$PNRTPaGJ z;AS@>gdLRAUef7XR0H>WTp3o48*oJ zu;Bj1`yFYai^tlbG0>W)HntmI+}hvo_>vjAn)LMpY1KlTpcqmICBT(GF6S&zS5d)h zQOTx|%c7H_=F)mn-I$v%r)D~PZ8+WYFyXIwE@ReYiC8I2|MDwgZJ6>nt75;|KI?1W zh6ELBKiMm*18ebwt3}jB`}x)p+^9x%Rjw)d2wW2#(ZciT?F~J*jSc}n^K|%m#3Fyz z5os5de(eM~Gwe938450@0|cYnLGIh;dsaagysx){dLF+aym8aB&NY!&=4K z&Q$xRTwo7EbJ4=tEcB*|xp;FSH*O#{`2C5ONm?IDx63CtG%RR%>}SI23WBT*6`NHx ztI`9Vj za9{M^_rYP+$~l=aO1lF&*oGtwQ3W?#!fn>nDcY$iodO%5x_|M%&(Ry)X)lkv0rmPK z2PmJd{>c$KKvnJwB;ZJxM>2DvNp_9p5-z3Qt-}m$j7Bziw?M6yn%&ROEvgnoLMEHm zD&;Febel4_7e{(^7#q0-r`wW7-F=daS~s6nKCC%8z=W2uZFt2LH?2Z2)E?ptSoqHE zkc@FG#SA^I){szh*@9<8yo&c)6;_3vQ!$|hVl{}xj%ZUk6>&43rtE@&mT<4(ELi@w zemmw9k+cRaQu_GBuddYdK57PghSX{!F&;dh2RE;A#ONgg>k9irY4bf=`kLCJU89J3 z%RDi(LRX=y!@qe^Nm{eGN_<5JZHjwAhKle@tZOaq@tUM$fS%`P$!&vz*i;LQ zji_hYA9~56=^^dKH&U5L)rP0;MppY0%ScqZeBc; z*Uh)Iar{g3>~Y4P8^ZB!-}>7PojP9~x1hRzC^P@?7RQ<@a8cClWJb9%9(!j!dbdJ- zaKH(-Id`^K@5=I2eX_6-?MEX+w&$7L5Cem)C@gk#pG6N7)CyM6Q+bET07JcoeOos^ ze`)&Ga4fmLc*dd>DryBcocIh^(#sIIl28EQ5hvfqgoJ`{qqPZl_LpFCALxWhyUbJz zXn3F{0r*$j?zP@A^nVRu2=gf0RX4ylBOU*pIw`%EB7twbg_aCHVcmVZrtoWa(n)W- zuB1`{lH|Bt1OM~nXE`I)S0JG&)354{pMIQ_L-SG2)oZBLBA@PUK5fp}EoWiw7KXvD zV52BiTjkO$K_8w>RyYPcu=(N_N_6`+fU=lt^qRas>Kotv6uW-)o<1C{0z+Ni$VxhX zz|+{jCui-O0(yP`Y1e1#P?fG=h_cv5ra^Y_x^_-(?y56SHuoAyY%JK^LFvO4Lr3a; z8&S_x*J3F)*Xm}_S<+?#z-u|YUAeEq(fL7xFO`pXM^4dr)F`(3CdGuvfHn=8nxDix zS^b>j6UVVIIHonLU+td|8=87dq6)SQ40=kb!0;1lO!P#8#Xzme!-l1&<`uf5wE580 z1lx-zYTYbQDq9*fIo#>BUFf?pLraltGR72n)LCrHp?%oC^33??CwBaYX;wSQd0%EmsbPbW*#SADYmDG^V(*A zuE#*r>bjXiam}A_1v*QN?1s&oG(X-;*|ZCuvkKB9SQeZH@l{jb)_SnSI1ytdC86H# zi_RuazWGN`=kmtVL?az#{uibkdvvHVi|BXEtK` zH{;DstbIMGwr$`DO|mL3_?4WD7c@G25`kO@#6euupZ4GygL`_5UE#@%-|xA%d|SWK6(Uqe?Qer^TyaJGPehNnfiTXGc%qe7(;j!7)nw<{0txW2D3> z(~ohB-bXT3l7^=DMj3t@CRWlkCU4W)Bb+2lf&Kcy6zrm$p*d?z~Ra z`qr=R-?!=3%wZ|0zWRaz#+GbSTB*L!)mrVATkq?i%g-$+wm4cog4I^Lf--Yat)-ar zY>z!3>FtJ|UmQXN(7ve=q9y`loDF;oYLZ+#XQS;tp%xF$t*!=ZS}R!Be;hV)_7J&q zMCdOrK&kC=)cfVG;EjsowbwgxDVm5dh=ICy3UeJa4UR5?Gs4JE2Qy0wZc&~pfQg1@ zpkocXpvkhDnx@z&d4Q?H4|LL2dw%T8KK8`W?_Y#mD<$w)g~9FjF0Jrfc#rh(m1BYw z?91@lET7PkeB&$9{n+3@v?$K}3nn9bk%4jy4Cb{ufyWYtI{^Z?YM%8Ayo8qnCe&!W z4Mul~LjlyYuAPh|{5ZToaCq6ku(|U#H(u3+K@TX9wo3d;vQnm3Be20vG9X%lA8-$w zDmj5IUil!!g&ue;N^)D^oizMVmbh>2Cj3Fk%gHe2y3#E@YiW4& zWIGLvGIK_<`7E-g4ao7j%1?xmxii zvHXkIGsp)q-zcAr<@C-7&ppGXJp;v)YOP{*jR?D@paQmQR7X``bsKoCJ9p za4Bu@<_>RaA<~lH1-aT<$NO1o&Zd64eSKbZ-Pmr+w{mZ9V-Pyao5%evg#j)1z1o-Nh zBEUPF-+PV2=z*B^o?Ue>56pPJ%?j}2K};cCmU6kD{@#GB{6j#^L1Eh|}bM1u>L9`98yMWdWx%K;ck;%Qk~1)pZA z)JkG%8-AHID^N|^ql9{8eux~>q>gV4R^Aumn_M+n$Gg~RQR4No=qRrm(L&pp(q}PR zP~M_7KaHg>86%J?A0m7XBYih0wmF-wiCqMRe!|k)D*N{ z_j|xRmw|or^?|>C^3K9%GsuX#l+Fz{pPu(_Gr*5&(>=PZtcdaE?sNIn)attV#F-Z@S7CjYfxm z?J)87$J#7BLF066`%hD*Zr|^YSM?+(I3TVzwxIJ+qk=+5BZBN!sO2)=zexb>(*ma_ zgs<5}N8`nSH+%i=zK^W{F?U~LrH7o@P0#7(bkSv>SPQnx!q~fgT1Fc~TCarV@kXDH zVC%WPlbI7`;ACgiXO*cGW9hbNnFUo2FOg6ewLjh(;;OxB)86w zi6Nr{=7A*pTbr_x4i=}o|2P04&IwSpM!O@Sa{Qx zoAJ9_7i^UUHe_3Wi9|HU=p&hKsHi&* z@|dKCx$-4X`tp3Py-h_#THXgFq!80r+>qpXxA z^KG`SJVIGgl~y-mtqrH=R|;guCXy)Bhg2Fww9XsxoXwVMi)+Tj2&>dhg`DR%6xG&# zJJ0USg+bqya+iKrdXpOyY@+A)!E3W6#@^r&ywaWfH4|CF#*(k_7#xg;!SL1iEncf9 z+$E&NdL}-_n52H-#rYzlX#SS<69AQy)j(O#;asHOp^~_BCiE_u*02Q%`_&F^c9BVk z$UQulTj6me=SvN6q7B?5uR6S7^F(1y2f40Q7cuF+6P`1kD5ppC$NS4_lWBumaCqT7 zNe^C>SzB@8N&mA&R{e$GlJwhGsg(nSy~CA=CSNrK$K=EFOclOY;t#sYSJsO8$Mkkg zdFjvch{CG8h*S9&Mti1YFXq6ds-^H8v3n9LUKzO9AM&L+%XFp(^k-=5CR1H~Y9x<+iW?_yo+fOQ6Quck^a@kml3o6ImdK+^V293)^slzcOz~g~RGw5jo{q|a)MQAAMF~VZeEmH#BOB~)#ghaBB;NbD<(Rq7| z#i6L$+SZok|KM9cXAn#2n}N_h!~r(rGri3d>g9jsaeG6)z6M%FDbYK>s^B)L;)MQ_iOg_5H~Gfg^a^xxwHSB{ z=YR7^yH){h!dZvzHjdCej)mc>lz4h7T#TNy`!}2U+id2&vv{fEk~oCun}kFAkG1c= z!o|b&alC_dp^r_xL$tE$0!c^5(B}62T;AciutoTj^O7OG@VKg&Ve%7wyUdIqG?AZk2^x4Hw9J&G+C;$ejNSEx zVX=bbWUf+8%$hmG!Z32iveqAOc{pv@__&sK#(dgF-TUWu%`$WHx!a@rPZ-i_o*{d# zA6DV`mDH^z$MZ{y4W2Cwitx)hZEeQgl08>$REaGO3GEDQClY#2 z_|J>8LQi%5hSDNOZ#j6?%}a(ZfUY38%OE7yQ|Srda&s_YUL|Zn9&j_Pl&d>$qK`Ma z;O%=w6-LmyY(6=b^%tUi?I@|O*+Rh|6VsiNA+Bj9SiyZm7Y}@7R6XHCR1ISHsdu|U zF_Vgu*<8Jvh6?-gg{s_Hx*n6%UzComJn32JPTc&X6{qjLF-uSvmy_dQdmeo6tt+}$ z(TpbR5%nc~ZYz5*sRy3G)ME6DPI++^2uXJOK4v=2WBwRW`FDR-`9NZ(yB{AnT&kSh zIu_aa#WhP*spJ`U$&bRZdXaAxBi@#LLf5F@t%qeTsP#H+IV~$tuVR4>9?8_K?At>y ztXm~FhEn-62s6F`UN+DZM89uYQgitR_u5SnIySijPf2T*O0BQkXOANFtSQSlg_0@* zC>+-CwCY07bHAQ*kVSiR4&0hY2EW=4E=NtUgA@-=)!U9#X1dhTIkB>7HUl+us<*3Q ze(tw@8+}dnY-6O)04EZ+a8Fqno{{e=gmi25NH}!mDn&^cj(X%;?-TVPIZ?^Sd(npNYv1p;LW~IKn(`g8N;Ptvar46`wP{nnTTr8@^^4 zKz#9@)NxLK!BVc1DV~6vLg3gq*)Qocp0oBNx##sNI@ss+ z6^zmfF8r*hb;||#q)pySkMM{Hprt0|*o{jU&fvpyt>rF#?^~*s#$zwPXSl|8>!Mg; zN9pc|y&Ce};W(Ofmav_(2jP}kozOW*%SE;a=%ueQ0Mkq3J4lRZ<-zE>TDs;f8wE$! zkF6_4H~WTeB{k(R<@*g6qtn$XP5RRPejOIRg1N_*vk?zHAFIRE`c^xR%H2@KI{F_@ z>4&VJc&5kC{uEt-TlkWIyB=c9_!+&9;mIc{>io33buKao z2$r?pMyvB^EfSjRVP3~`LojdS_~`j-RcyiARYBR8-fJk@1#5rZ;_;&q*0#*I<7Gvj z$0+Cg>Y8w#2KJutBlLuUPmA3~@WY_HW$X0>4aIW+QaKoUdRf@&>SKx0809PL1U4Ty znRT=%tk4{7t4x!yDj6sWegE0d!L3dO>-Sux5czq^jq`R2q;9LC=k$=7=}AR+o2BKI zV?6?XC2Xvogv?FXy35uNActSwo`~QO?cHm$>%Yuc)J85;eU(zOeROwoWMQV=_oALb_kJk zW#fx@5C>_&i$e*chHZKp1B@FfKQ%1Ch$W?>BksPKSl}{ zyS&WJFpm*6Mhd@Nk!rQK%vv;a=pw?ZRMXj|wtVq)Elk3%xzDeOXKytT^2Q`aN!NC? zLD%-ZLH~PF1x*oZ1s^ElnOTsONN&9-Kz#-Hk3q>eZye7n9uZofP%X~)%i4P8Ere1H zq`4G6cb)AL^3913yU0iWJ=JdgUl7VQ$ZBF-@9K7A@cT>UpU9g`bwf(bC_aJYgiqq( zp(@jClx@FA<`PDm=#LjIWD*psM3?lAj#C(0+vE_{?qNmdoa#>ip8o(7YB@0Hqq=zZ zXev{ua+SF+;?3u>37qt8HPe8AED*We)m51Lq8_-4Q45ON0syw+6hmHbbS2H zXwXOKiO~_yd~)@`YSiY+N6-A?V(5Z^obCdVr@a89>Xke%SzYFHSLI3GOc1-s_Gzcg zBN-&Cl-aEYo*C%pwXPX`k5P!S8{@D=YgR5g`|89I#Ad@Klp~^fm=tX7y^>9kJ!dX) z0nNTdF}&Vc&8NFnLKLdKEa4{=RONrpuoPB0fB~>eXO&TL4$m1vF#>u z_vo*)Mo~lw(zSjj*HH6VgRw~YI8}q5M%{ZJY(Ab;#aHJ2 zQ2r^BWBj_Ex@Y=?tMh`PV{9L!!`pXzn2M2Iw?T*CK9SeM7H0d@;|A6rZpWw(nQIZ% z-6ZbzRT-Sq$&xyUYY|VD@U^8X!70DhWI>_gC6-0`eFJp+l6 zs;_jp^!~FnnduqC9FD9a*&||P9o;A1lfIL`;KCg-kEQeo2NTugkL=)w@f~`U=ErTB`Sr@od?KcQpm&r$Iy)RaZ zVcd100)aKC-k4qk5~*b``puG!d}GJkLIet7;t01!P%99EAHc4* zlq9ySI?L9(<$Ebs3ER(<%d@rR_u|HEu2ejS9KP`ep%ue0xEiC1cJ#MExS2Petq0>tqt~j=N$&our{CHB_G}Ak3m1i21msjQ)@2f!ij2)9%(A&eh)Tku;pqE2xRTW zRu@DnwMcLVI=SqHe&HvT>MQ8P=1NUlEy1`2q9P`+QmN<|zYN&MWXSVwLPS9BR+<0U zf75k<%w7WgUtjvB-=^1<)O+_qb#;MfpY|t7A`x-eYdfme!G8J{wO+4|Ih7|rq}(yu zU9vmX+sA|qN3y5j>hCs{%3o`|N|yF~4{77MriLCsQL2`fqdW`iATJ3FYaxj0pdcu( zMND9B*n+y;e31RP0qqD}by)0p2UQ9#m1APGDCSZ~OX9$J+{`O7FU}q=N8wl=C0JRJ zVH&Q2WV`T%sgGJ|Nd$dnH+i?bi1(gwCXXg&e5hbZ{t?W^DpU_~Y{9%zop&SJq^frn zIP>*o^`P52#l#%7r|H&9AI0u7b*1R=CZ5O8B30j3F&CdG=0-&t|H7+9_;g6Y2JfbBw z!WKhSL4C&3)?y+S*jCp3WwL}tNsrbK%464h1~^BHlhAIo>Bh**d2{8ZJ-=YT|C!ek zoE-zUe;!cGQ@KC>L&W|;i}U5ML46C-M(ImEe%Sw_M~z2-?|2I~VMUBx7-2^yHJVF3 zZn)G!79~XuiMx8voHa9*9}L#G*J2@eu4*~^uy@JV2PR&q?pK5ixv%{Q|H<|%2lx@S znR!S&w9K8m7Eu|Xl3F9SRjRIf;Y3|EhMKk^7@R5) zGk>h!h|!wOaCD@?c%BH!f=i_s$`xV1#JKj2P-)5{XAH^Gk8;=%( z;Yg;^0l!X&lsmc@u~$KQxx%lTXNDxu$bqpDTKuH^SpVTxlO9Ftb7P6Mdab3zGf?nX z7HbFn4KteolKk&R9+zl`sN;_3gcH^%LS>k{NO7h2-R+fT=NRe3!wwg znW_jOHPsOnj0di+x@~N7*(q1=D_!9(*(QXF6b-%)Ero59|cQ&lona-+xgPkHW;?01%L7F0bEYQ;L^ z#O>LlrHZOLi0d1R3j+5RXmCxG`O@`+yfquuu~EKK(Ndh)TSNhZ8i~Z26F&kA`5N>Er#m3bC(X0Jd{vX{o!w2-^+(UJ}Birva=0H>%tQ{=LT+H)z3qvW0&9Fuy%pJ zba#AQj9iD7dDvsjzY~~eeRJT*5mHml>ec~LLH%Xi>TS=?K;9s0CU5GIn&s#vCw&i` za`n>@e}ME5&iUwaJ7Qh_5Q@#)tQ2otOl^*bG}(dqoB)9DPHPMIF#xkI3j zQtK3OI%^mxq)Cf{=P0Ix2Oq9rv?>ekHXsUgFb2fCW~c_l%;2-}hwsn4jntVRsb1Ak zkCAl3ZJcm@3{^Z0E6^;b`fha9O(vMz9*>>bdV!0?glF&evW!bd<)Oo?AWZH(H)c+P z`^&Gb9Nc^Z@qYM7cX~iH7LE%rB9!6satBu@q@EYw3~Msy^Zv;q{bie0n2PYuM!y9r z0Uoz9DC@rZL8UG*zM){_lgB_x8-aOks!2E>W;Q-h`%X*31vtF_ceNC^ zn{syi#RUM~;{P}6D)0{(1_lP+eUlRabHsLwb8>Nc}3g=e_WMs|#I?d*jJ3c>tk!>&i0VFIUepL9! z=Rz5QhFj$P%whU0@EdeUNo;cBIy(Tj1Kq7S|E^QI3mMzM z+h2IN29JIu?5g1zAw<;s-h*jTc+v$0qW`|If92O5>0#Jkgr#{fvj$Y_-2dmzlNfyO zS#S6{I~OFmZ`gcvuDx$D4X?AFv}yCY=N0g|S4y(1exF4jxj63JkkyFx$KKe)&wtD) zy5817}vHAT*muroi^w3exZL)m2gGEKino%C@znZac$ zU`~w_ISA#F^r!A%FeK~QXPZwZWT63FoTyY@)qGOL_fmNm@$ z)E88D5`IZa)A?oRaZWvkfxIsX#0>-*KcGPb!_dQ|&FjMCq96V}4Oin@C^(dj10Q?n zC9^5?xmxEpt_6T`RIS(3|2Y#g2456-e5LAMtPLjV^?(bg=@>8=b7nMp0n*voo(4CZ z5}51k>$PBDs%XZ=xBrM)8Ql+rfH5sXWB-k+{Z$yoX~wWZma>3A>NFHx-wlDvH7wWd zA&moN8j6L}{Ls7%NqcDCT(OTo7Is|_EN}GF@_2g03a5j4zeKEm)w!{Vhmkah)U0}x zR&_;%Rst~_3|lml2uo#`+sWoriEOYvFcUFgHbbbg#XRqw{3)`?m8U0?BCIBk(vwQ` z1QQ``r5gz{c`nk!djC`=YKmS~1Q2UAPAgSIu_$Gy4F4SlX*?AZ<^dwS9may%e@{N+ zJf{I9hVlMKIl7);0*OWI9ja{t`XEkW{Za7tqY`jevc|nIjQI>kIdu;u7s}e;p352} zI3q?a21Clg4j=`8+JQS7+Vt>}4J`Oec==Z;oKVp&zCAwZNv}O&9+%OKwhEtjxiM1W z{s5ILII65vDx;_LVfyH+KivY2ar%PvmXH6_f*{A^Ru?b?0sM4wlvtw0KPjRcpnqZN z8+!ibmq_fdTE|_F&QBLq3vFF;Em|O3iu(z3c{#@p=*-hm0x7j zUzg?QLK?eV~0rae9G!u22 z72pHKxpzy9j*p+&e{3oAVpaSXRR>XMOKn~2_jjU8)iVAat9PSR)tZ{&0_$LSed)p# z0zquUd=X-K-fbA8PkKdXAeqT8vSh_Y8kC?d0Z{0ZEg`Ez{F8B}GI<9$8e#KTr1gF~M}(*zEs2E{{%wOL=zA2;?ZN{y z%p;j+9R3OA|CC71y95JUS@AH&3m;; ztPghec!5H@@4iZbLCw*NDEm3}!sn%j^LxsR$)xE)x3bIr)*<0tP&*MTz9VEsPN%sZ zmV3L%2%Q5^)tc1VLi$94AQ(OPs1zUOr@GHsF%s_H- z?dhuY=L%qJms~hC7#&{0qVd-1N}Xqoo#?Fp^ylw)B&ec z$baOS>DqP22YXFo6Y^%y@I{3G!CepiaGk=_Az&Q^kOYe%_~`vyrtzh358Wx=)dmr> zN&-YFU6ShzneQH`;pS0e2s;H>(~p&licS0~F%rVlv>Xdt6&}Wy(;(m9!+$)SX$o#e4oSF2$oL_8^ z2R9)C+=OdgSA}8|z)h&Un9KhyY(bK}K_DYr*xI+}{r)VuHY%U+X73Z|=Ezfl4Ks&x zA)IBw{ub1e$>$Hhku^m5Qmqec`M4E-_23s#+= zMAL`MeLHx12VD$*v?_`GDR^R3#-n?KZ}GZIC06rt*K`bD$#Tqhbf{U-F)+u(=IqH_ zun+5cwZ|$S{knQ86&~)<;G@4U^$&|H?x;5i%%te&OFld#Ho5Li5pGhlTQ`?XX4yQjZ`a)`;4BZh zWcmMXsP6)|RPozXrlRhSuoob5WY4VK(;rW74KM|a(pfwjf4aIxEA&Q+Lo-1O40yh) zD|G3e4Vw@3;)TaGkgl-6+p2gNmzrhv!>#8Nkzxy)N3Oy|{11+tay2LS=ag!M)omT3 zy`(pzX*g=}k4L%d5jfMaE^c9+9bscY#Mzt9doCyftlQ(urb;Z3HwFqYd%zZW+j~6N zDm$IZ7oX9}Bk-u-@8R$juT|p<*(H5RX6W>q)sqaR6i6SG<5~N(tS+ zT_edVcs%*ZY(V39(NfQ(g^s_zsjr8GrG5hgLlaOQ>r-!r{?T_oU3H;zbQy5WwqB9~ z0hwuYmw#Ccwuk3{>Gb56_@6Zo;b4~eU0%-Y_Qtt%ohi~U31NDP>?ZE zIuY4v@<=ZQ-0vzc#hXn-FFV5)63SkB84hbF9PNGfGCbZg>jVeS)`fKlChl@f(0NW4 zI%)MV`v;@bKWVGI7q$`n$Et49DmEQ`##l zy`6loKhMlVr^@hWiLhP?yYg}*Gi)LCLu(tU%vQF_3$qtvDYV~oWM-+ILqBfLQ81C) zely=-&XZqx-CQbh_nuJcQv=HUs;y8(nZ1Hy-YvSFMID|iL(8-^cY+*47`=B0OXcfB z|0wJKvDyh!%}8-3cTULUEG9i1-eZxboM(~wy4P*UF-K=_D;tBiuOQ`w%R? zuV%AD3bI1QJ{iEaoTjZ6bvkL^@SiounnsvO$CY#U@$dDM>e`h1zH80pYtNgI#?lt8 z4;xV)@h#%l=ec_c%(bKUW4yx_qVOTT)o!&mw(I#1iymti&4rsBd3zHNlPK@Vv$$GVef*8SrG7Xd-_>Z761kGPht*#Y%hL{l zDd&4NG1s59DYNF{lJb7_woUQFxKqeCmZ3u=bN{#Oo)ZZIUh&%Lr#P@`9>Sh1tlFYGNz*UGf;cTI|So+iPyuk%TS=NVLb?nV?Az#@4X>- z>`VMHE|$@avBfbLnc#hAQWyaepn6lH`=5xH#Rfd;i!5bb zFdG(a2kyX1&n?4dkQp}Y(P*3oZDADgXo43mi1M+b?9zp3j`RC8a*Hbrii@kBogNlX zV?5Tad?)qe!5k(B;R{^HbzU)EgG6+-ZA|45lhTXbYS(Vrhdzn zQu7UkgA;_{t#47TOLGbJBIQcN>GM+zyP)*01_ds6rvHI4qe7>t}cy_9)Qk39V>!MDIOAuCKf!@TZ|ip#Yr=l&*#}?X4g+| zo5&E6G6QI0`UGFFo57``%%YxI{0r}t8SiHbati)N4L*8(cUVbpLN@ojw5_xponB@h zDWqbit4b9MS2N#OucQwktrs`aO&g!xj`c+>SC+zg5K_r9=`-nm>uRmftv6}q-NDvN z;?C4=FWTDRsZ&~{*AWIzmUCM)dj`tXw~W&@Ihw#4tbwKu9=X=gV+n=!9svb%|A(u$ zjB2y%x`m4sE6|n##jU_yT#9Rp7k9Vf1Wkel*WxV{cPLJ94O%GfuEE_sIA8APoL9~_ z_MiO77`aBSx!2xnt~KY9@0-w6a2g1s?n@j_`>o!kA>7&dC0{A69(Z$eHc{^&jE(2; z^p1^*N!H5mKH7e%^7Fvp;0Fc#r$u436APD42#r)ypN4*_dMI2n*tycGypHGjNa?dDeX|9IS zAsE$m^WXGZy{o``;|ifs6fA0Lt4@JwX z{Mn4qpe9tU2+-F4?{h#pH%Hc70IC+V{?aw; zoojw?jb;uwOkF=`z6DF?JZS6`9-BEv8|Pg-_79{ALj9CTC>LGvqM(i86?>_ZOn9!T(wb#@XP8_^uiQ9i-yM045aNjx8hriz@a;=lYi7;pzu=VR^&XZT z#NF&q5rpWu&jNhQXD_D_RimQ!50#!ZqrI_-B>Q(Cr~C+B{d8N{N`Erppd5PJPFA> zMB=-~i(L9IAM{vw*fb-eUnS5|<{`0IG?nHsQu89HNg55z?rf_bfvceFysy@Kbf_k03`CAit+KK4gNL) z=?U{el=Eh#_IN41TNxEA!q4{v&(zGgZ%oUp5aR=ev)L_ttrzd3)|`SsO+k_y&b3%u==H%Vs^rWut)P`U127Nj4alp}Z^> zc`rK^NW7W!gzNjU3o}^q@Uv)sV(M-gtgN&NbLh0w)3J$7JyJ8@w~`Pbp|bUsmOu(l zM;(&2Tg(jaPteY|e=*%;Z1^i`&mVbY6MPSk|8E)z9o3NLJ*v^50rF3^+z$PA%busn zXJ=|j-F!6XwouJ$Sk^J)>m+UusilJ+*Piy3Jg)QhZT!pfl2nckzkMu^{Fh(SDG`q- z{ZLx;1<7j31^cENi!jSi)Y{GZQm#Gwu{-d+M#{MI@E0)ul`rfg@4O_tZ4I?!k6S$D zkn|5y{EsV~$bKcop!OKbNrDoF3Jh4jW&Ziaf$ztoMR#YGyG19f{`hAv!x1rr2r2)u z^X=V1#iz@t1WMt0h0mV@04O5gHvg%b>Au(*o}TGDFxnON=u;)-H1&VNtg6?%-86?* zI5lPxZ_2ek^G`^!1m0gB9rUCZCKQ?Y-Zs^X9doPA@_*!FvnSA!#$Ax6Ohp{i(dAhP(4M)*Q2Og6!F^m*p1ww;tI9^;D9>ixy%V6p5DvOJg>R##J_bvZlZK0fU-nm!vKV%aKYqDsn` z7dA>bCAH4n>O52tyOq1UBBv4cx&{*;%s>;Qaf#)uw()hkh3$skx3<-Uj;&QYuc7ku z0peTD@A*hx#0sxt@o%HqY8e@8GsZq&QSqxVrTdm&l5$Grx-JvAJ+DVZ0OwoGGQ3Y` z{1`xs#7%d1!XKcLcb`{I`cw2^NsiwQdHFmmPNXgelD;8PdYu_53H^!?8`a!{?JrHF z6^&dC&{LRh`+=h@$sptP< zO1)Ud;04cXH;W%r;)+*DJqD|a9nla4B4w#p8h8)q=NA^8!Qo^AG1pB-&W}iud<%h? zhsv_&JzLY|#2^wMf1kFV#J{@8S4ja}EnVcpqr}hUQbcYoZdFI`wGpuBh={m{2X7M7 zr_+hS-+)=i@{qHacqAOn>t8=|uq5qdJ;vgCtE79!iCm7Mm7`uw43>{2jyPD%MD3kF zfJXZgbWG@ZE@x-n-nt`}b^=F2V@3;nnKi^bRvy*#Jd1JJRfHcENmBK*A)_ase$Ql? z*LGK6jJ^5!=6JN1+VdAKFl%NNT=Q}*DD+i)IAQEhU$V}pJpzQ?{g%1qL{+b?ZtBNT zu?r`6WgswOxl{^m;s_WXgXqg+?7OTe>_4bzqjP`QZirhhjwkPB<9}3_iF(gaXxnU& zmQ?4w&7vux_SV zTw4G1tIc3#E0|CPxWTFhA)ig<1$?TE83%z3#~`;vI`Yj^npFcEwkAFdIWU51Q9yG{ zllzI-xu1CJjMmNZiEfoefXMaEO1~b75v@5Bg?k5EBEsDS@aN$M&3$m5#DvW}HWfUk z1>G6pvE@c%b)#F{oNL`F$uVv2W2ospSG-hd<^V(m+hzNMABR$<7h z*m2%YCz;sf+y8tuHS1={XGyYwzX9+SzF(T6)iEd@goJz=_2Ft`MClxS-T|@vP@v1(ZlF@ z__G~w1>hi2C*FHs(XuZ)AzNyi<$EWeQp%R$)O4jY0fi7aR6V$p*ep9=u$^D0IrRnJ zUTlr5!1M06XM*!x3CLGWo-Sn{(}bjAgrCq!7~7u~O7#QCwoAOAZ?8s#9cLrfQh&?_ z4nMcu1eS_flciz&S1-U*d)DKS7P|P=-}XyNlBO3;Dl5Ln@Z**Z9d6qR7vQ=)Q~&)* z+rgJ34He-ot+%7eV#6Vg^}?DJ>5xBwNS#PnE*pw&R5?yXkW|!JU>1%d=eR zyC^@q#a%IOvvnzWSiOE}?7cj)CS9fng=X;nEuU(&br@aJV$m6ZBc7JEB*Y)aOzLpF z!d}9Y24CZ~M}9JV9;R&Q-;7tDaTgue5^TR4P#}xQ%8l`{=srg}vX`cCv_5==UW$G( z_IlF0y_{6pydh+2)}Fwv9hmF68%%R_fzX{)d_~OrJ!#F9=`xEWdqu%eKCv<4!FvPe zYbDg!r@e-^xju0fE!V{I69Y#d{O8dgrMiGUu4LDHZNWRqFL*}^!ojkfgyHbw;m zO`%mYeDJAg1&8g=rnJHKoH5}{jGv!N`29d%GBvaP@{DQWwgWsAyxsey^x))jaac?$V|^cP(-HuZamgV;hPCQE;xzJ zV4v9v8pkUYwDjq*4x@&6N?&W&sJ%(DR0ebKmc;Obe1HG(wT9S(gz@c(!ucS%6L2jj zdgp0ng$W}B1Nex?-{jJA+XFa1`$gbtvTO+b(yACEcMEChrJ?a z_q*FTsnc@t6Fhmq-NJxQWr#avzJ$30vKKuobo3hBpbvXbD<}AZN4^{)&*?~+@Sh?K zWG9_|sdaN!oz&A}5P^+p%U8$etBi>pb6F^rue#M}CR@XbOCeZM#hqxxeaUU`SOP&U zR38;hzE!{I$F@f)Yzx`)bZoKC^odbHmS`{arz=B7+%#>!1GH*L$@%4sjWdT*1to*F zLV(X%&m{?M8Tn6Cu=xew+t~&`>fNV{algjI@rK9!S>BBq$db$Au;cOQ;~$LkJC^4J z+sT6d3O`5^@=>)B&ueW)8=1AE`$t1Z23t80$9USjo`K?>C)KV;bd$t!-_ggzu7<7F zS>qq2zAkB(lmeI8*Or;EY6;`?_>HH$!x{4hxv%z<4V)_{k%v3M+c@A0$+w#&yU%@A zB)i9?_J~>00N+F^`YztN=<(+b{rf`cJL-lV_WlG1OC^%s9=R$JHDCg`RG%ql0*nxy z@?7;o_x@MTsugwdKhQ%(LhmK4o}_%6A4XFCWJv4Su7vB7=KxYd7Lc)UP!>V_CeN*a zKLu}7>T*ZY+jkyVTf+3bT1iP<)u(@^*N z!A`b(VDFjjM0;!f_vw;}0ugR*Zj01oQG;xxD~u}u+4_l5!V!i6OVLXKmT0f&^Ex&j zQ>W#M8@Sp_N&U6}xI!^tgfW@BUo=bC>o%bLCP~k>2HPvkJ{5o}`!KEKa+7vtN7$AQ ztk6mJ_PBYzJc6Hz$-oyfD_UOXl3Y()-|Ft?cLI+~{`EmJ3_pS2#80WbZTs2qe$5)} zx45VO2C!x?iS+B>fQQvtUUZYargobXK(z6$okxc3cuT$Qh~O!ZG*aXL8YK^USNCZNFhMj(u5gY(Y|K2R{_CU z6ke>d=>L{-*Uwaa@jNd~aLOFPhm)1I457yk2wsMqP)bR>mg){%t(cQWmy$w%_xVi~ z0w0Gta6(h_bv{Dbe++xCb@tht@U5gA4N06cp;aam!B;nH>vni>)5VA&6Qlm;<*|#M z;y%3ImRqL;oZYs57i>t4oY9P?K=a^RynS_d;>$TQpT`?6;nwV%PR4hl!>akFYJh zJdTx2M#3ZGtBnvZeRN3D0*S+@bz9W39>6U6vicvh{9}6FE{UtwfaX7-zfwpOc})~N zFw1z?gyb~Q*#EN{w{bLivsqqhqJrYFK|HsGjxbahD!)61R565)op5pF_qU7wcUwO^ zOaRtPGKK8}W1q&LuT+YGv1({Wgb-h{-BkQBQR=Laa2EQXiXL>xxuX}L3zOElfLM8di$)lM^h)K3Vgj9FCvgdk|~`-e0TkvgxA?VOU(}n z-eV6`+5XJOxUX~YX16!2J#SwU{2g1A{inO^Q|U(jc}Z=#nv#f*mn5ne|&$UhOGdwP`mspQ;s@$(%ySUpd+EfnoQzz!JCN; zEnBU)4?boC7PXF(+iwRgQ5IeIXS9$N>*Yd>x*{u#7zhMH$nDe_b7XL~zvPKTj^MLv zun`vh>#VYOlhQ=6lh%uRb%b(?5Z3DH~Evwua$AsyvS3J6Xm*bRGu19f3vk5KFg2eCLi(7hm z{|m(by)4xNN?w2`xj|>MXBG+Zixq8o`k;?p7CF`b)ErA9-_z<#Z5k73Gs2rWkbYvE zz2l`%(Nsd2k%kizq<4fR4;zybvwt&lUcw5{z@t5uX3KZ)J=CN<3<#|o=TZymy6RBj z^kLE;g{g)aFb? z^?B6qBMWMCi9L+wfSyMu?PgmX;yi;MugNA*?>n9zHRU<-gb*m;O=uly>G4~g3lZcRC1qB^LJ#b*g+4NNjeRUlB&B&dIxBSvvy9zhAP@7fGV(>t-b7JXiAj`yutu zG_8T?C@Zt!B=?Z&FfT5e{)e@4cI?+3fRZD_T8w3*jgr-p6k3wl7pTvy_N|W01j0ipYJTqC>i)b6N-R18k2Y`K?-T!B4f-#td$B2@fx8>fp zt)yYiJBdKENDLj6Mq5jBvr5}C%l*qcwut&RQMJK9zNN#VRWqlTw_0Xq-#UNf=FLdx zwD)frA*1wQbEnL8ML_x93%I5-w|jb?@5n^D*dy@+X4j_%6zs9#zM;PxDq>L0JZS?8 z2S{Q2sfgG0agbOoSABEQ7}4S;b@3Ry&4FcU3I0=x2x(&^dQ9dTtBxBvUG2Ykk44@M zxy&P2)~LK%lcfr3NaC-qu0GvA)WZ@3qQ_@KsPM>nwQZi8dK6Fg~gA@GjFy%Vsgd{V2iWr1#a}laJ$dwZ{cK#xS%4U zeI<$mF&2ET_hbb;R%t|R`_nTsW%?lc#XrLEsQ(x)q1{|=5F~gtxn(@RgNZ-hi1t2_ zm*^0v@(Q{ivr3sg_M~|x4yN%tX{s7d4L?IX(g)BIA*9ZO5|{gM9n0;Ag&UCzA0zA* zdRudfcbbvB?K+~(6>Sf%nmtT3Y}ugiU?RQ)o7UqMD{Q;;iG#s?-E|zxK6r&+V%Brc zhF)#Yy()uv)_!W^Icew1xcX!|v=Lt}kDV@4nf#edZxBwQP1IO-JS+a3Q_V?8Y0%)G zmWI1ByzZAdh`TF8zErhR_A!Rk&OBBRm*Nv3Sn(alW4bCFMN$;1uG8ybB@A3Y?>oIB+Psn@+;XcbOo`1)Gpy39sE8^ zSxI&QWE)VmU%6)e?{NO_=K}43fmh%Qv*Wj&OMfp7U)gQSO=kp+4Di+^DaOj`bo^ze zDK%d|m3>5`umxL}94M&s;enM7w~7P1#*wj{s^mYhd}~^6;D>};kx@{=f|L*QYpr)V zsN#|JQI}x*`D*MZbRQJtJdCpE&%aMkIXee~&?A3V(zBM0u);+QHZ}FxPAp=|IRfpb z)lJRwDVj54XHz!{rq#OdF{uTkDl2jOhWa%1n(@d!Y4UZ& z$+E17OIVg#jHSlkd>3`O*k(%^v?CxrvC=11D0gz@HO8Hu#n2~huwB&FlZnjbqoWHX z?)QAoN=XS&BIjowBU8-_71|jKK>HpML1#Fi-uv`5a*vBZEZFTRd2#Szk&x-jyGEjJ z=;nOE`6wALAlL0^L6frg_4ToA=?rvDaW~zGje>|sKL5ngH;WX;$LE}TCqPe6iM&l{ z&HqI$K%6Z09>@?sWXy5TTC}VCkSHqMPqWbVPkW`6VFrCu-tC@Yf#~nWtH28$}SL#BR?Ehz}rm(2$1vu(!vp)$W zMui9yz8h+-%c1w0aq&mX$OljU=Z^l55{4AXx(&u0&}dvUr~D->-!twm`Jfm%LVi}G z6)2#CW;BTk!`_ns7TBV}4}9=Ijb0q88rjf}nTgs;T#`y8p}z3$ITvUPxjr;vRU8>% z6v!nT3lxNP|4I2|tW*Axvxap|sqf7E2J$H6p|)kGfP@d*LAQ($Lqa4c)K&4x2zFIkZCv@9X0$ zg^Tn^Va8BZzdwVcIll44@43%?=Lqe{7Y_x_faR-{t}5-E zf{9j!JE3L%$6S(>WuMv;X!w87H1NG$)uj8&=Kc6|%$H2Ds`?}2-a(gH42R8F%ODlT zlHh2xgPL%Bs1dF!-R-3R(XIGqGqj6j#|1F+6+EgpT5Iz3g>~X9DhxQs?(9cfv;W?QDjGuStnD$_z}xA6ClRAOkp1+ zxl=Y?;mPd{XUC|?;i|>;Z6-@UtUl{+boLO~Lb0`Fnm2j-$5GU3Z+oXRqS_51PoBQx znW&k|hc71^1}m=2d#rqM6aOZ$MOGNy%lf>X%_Ejrb76a|UJwKCfqN=uLPKPm&}!?c zYK3YT@H@wL7<}oN8n~dz#j&_uR4)3x1nwPj+%`E=!agp)?OZ$j+2@+Db7-aBRegL+ zykgX;DAbKfG0FG57Zy%D!M(gj`PS1<5U2XNgDfHeY+o%nBCXam4C>x*@*Mu8)qF(D z_kGJ1eShG};AQ^V)468Dfrp7xh1;L`e^{dEzB@{}x)3a=SzD+*4*Ldqj8Yyv79yw7 zCDsX9q7_$7mB>d&)>V%L&8oBP)ew*3Vvm+4L~F%(`2u5mwgD#CyH?KT_ zJsiz_oKuo$whUQ&2m8GRGc*Dr<=uk+wRxODTG>do_$|6KEiG=mwhWyJSK5Fflhux0 zG6dAxbm!=bQQnSPeN1;yZEenz3{49~c@SMc(uLI>9<+Pyd5uXlfL$kKjUX6Eefaf4 z!e9zAJs=*7R>+%(ik$Y0V1S>qRA%ijOqXhR%=cqRhT>~ocUX1HA33V_k7SAlSq_a6 z)d61SKqV&m92Rm`{0)-#(l8ZEz9{6>T`txXs-ql5s1b-_Taz^4G(n}aTB0Q(ZmVn7 zCc%EuG_-N98qM0y zwGAvU4lFUieq7YSh+%5U+&_&7hp%|DOZ#ScX_4Ke5CQ!ArqbWF5lid)SF9*!sw(8u zI}_hETR`lj;WA(WZU|S8l@C7TgUO7Kyaa!|;bq;Hqi)mW8-#momk<1H{Et(GEzh*s||Z;=V4LlpxFNYBrEw*8A6(vQLNy6T`Dv6v)!N zAd35I^<#$J^!CnZy_)%CE^ZuQEP4i9cbjG@S5brQYn*n-K8OQ7On+@7tP3lK0|RzH zs=LuY+A>}%!%PGl$4O3{LjT)V(}RJ^T$~{N7UcZ6RKxY#l`gby6s^+lu%=67Ij>m~ zpYH9@luw^1AA#bI&tGPNmgir*#>V?G8b7sb-A4jBP!68Gdf$u{5VGH&wibSc7$(E~ z`Vl^l*8Esgb!(5CK#$|#k)2xv!BuqEQA;_I+1`&P-&3YC-=j7;jQ;##%SXQzbEK_4 zfwXIkhW{Pfi8j6Wh#Fr%>R)p#H|i7W$Twv>3M`+M-tM>;nbbI1Kx zZw6gQYIEGyebVyOu0GW=^-(RdpkRNrdOz&DNZ6)gL0mayWOp^gtV1E@lrPaRHDF>l z%~+w0nlXD^x6#@}IA=|pP7K!@!cWw%Glgct7>Hf5E~9U)^FL&>{6t^jFbN_o94*EZ z`M=u&b0C#`U@D2%FRT&Pu`)~UJMH~iv+EhgAFj2(u>0rWzY~(B4@5;}68wQQ?jx}G zM7E3)v)wbbG($?~ce{&IL`9cGawC0zvSi~=2LsTTX68@`YF*360WvzEl=XlBP(%-^CZ6+Ef@Y)+KDt5=DZc(1W*Z5FX}$;!XGZFW$3U-l@z zq7o#dY({8ZPxgH3QSZtS-|v>*eX6l!sy|sa*_J>I&8v3uK5>%R9c%K{InKOqZFUWg zO$tMuMDB^C~r&@=JNQYG!tA|1|-hzq;b4a7Uxqdc% zI@?esFD=?=*aw=J>%n{ndQhSv(QY-cv0c_U)R9>WT4-zbu;VNI`*+G(^nF+H<&fz} z2_9A1@0e z{22Q({|@((e3l7c+{yj#8`Q3T$-JU{Md=T;XLAJJSnoimd-P_(`GNw<3es|}!e7eH4t17yRXH#;oMJ!v=X6hRZ&Aam?3mU(?$J?)UwtSfJM9)C!C+c> zKP+O;tfZsCCE}jatX{jk$YvbD^l3qlXMStehs6*?)|WaU&IAwM%>#$EVK&ia-s3mh`Sgy1ooqzZ ziuP(`EvYp7cUFC81`=Ky`nUNn zV?taAC1TNaK_dAM3@C6>O#BV_DDl|}>NXSdxKQn8reSBwJT89#NWD(P}0%+i-;k2s^u0yGI+-LFQuRiNbY8Q#i9N7^aS1F z)s%CxrSU&k!ikt*B>c&eYz^P<9;*y=^7O91Hb?hnbM#lbF}odxu{-BKR$mHc`t*>E z@ATOtyXRS0z#DNxt4DG9_CSnA8KzX0tv!EY`Nl}jvG4&M3}IxeIeNib_Zu~5`VPV! z>o@e`*Wn~?nKVh#PXHBr}Xc-gB&eJ=^Ar=W>3 zo(NgyRb~X=g)BkfpvTgD`0Dw$&|`lcb_`hFa!Kgw#mvq58Lno30n3<>drDg1eC~cL zaW)I_n2ERxiZOaapu$DMc`@lrUi>OUVrghm;Fc%6(^|k+wM>1PI4K7bkT?($!0!6D z(V@J(I`!!^$N5H0khSh@_bD(2B9!HmW%V(*+YM*e%{Dp8wkyv}g=Jf^Ea8Pm6)00Y zb^bWG>pwtU-C*K6X&wDh5a4L5P#jzOo9Kt%C@MgcX$l%v&z4pa{S_JP|A$)AY{k)( z$!5V32z8EAH zt4;5m^N?}8e4K%0h44S%yx(Y(N{=3uS@DWlz$x%Hfxz#1wM}PgHvZlceRd^h7Whb0 z{oBg=#GmZ5>sX^SdEneSQQaR*#QM!oG`a7Qt+~9#`T0a6=^~*Q*2-(Qf+wAm&8cec zr!%4dlz>27Rn6o}hwOEFUPl=!E}S>-tEE_MYlR;FD~@sfc6Et5~^oclCy+&-Z=nkz7h<`Y_<<`xxrDUbyrrwR&r-+5vgxkAwDQIO*pU(Oe3-R`z_vf6pB%6~fcOU)3 zEG{k2tlc@Gf-~Jd>b)0kG*p$av`B!h6Xczo1aUFxXb*EJTEEKu#*ZC4S#sMy+WMx) zSKLR6#=d||cCG&YH*b&MUNm2wI#TYI}^9r55iQd-vV-!*kfydwEC|cu|K5?QE4~k|Jj;l9Ryjht2YN9Se%6t zpD4E+`ZAxV3mG0#s6L;QeIKyw&s^`-rrMDzwCD=o7hwYM2)QJ?#1{%aWU#Fca=`!H z0AFtVR%#g>ZbkZatL5Zj9(% zNi-w+h|5sd;I^|7JU=W6tM#xLnvZc&b-k^Vq1vqy-XGZj9j`-PWOEc zIVE|kdMj6EOlfSp?CNm}vFR4G&*A3pYizo$G)>bAlW-?$II_W#TbNQ|mbAi?)inGI%Om)XwV-z!XSgJESo$?s!2y3a3FQ>O_ke0vWY|zGd5jxA zv9a@#bClul$bruO94#}87di{S_ofg=P)SHxfKxh?=zEoE=;<%w09&Qtd~K*r>qCc8 z*J_6doLg-d$@u$^N z1AX`RS8&W4@ve+oqx$ZxRT^>0*=T3iKXc9)Y~7O*tA=VGsRZEOtMGgWm$<-;TZi`n zj=E)nzMkxwdISoQ-}$@XxDPf*X#ukws75HIgzeY4)789388uvpOu1V1h~<_xDZM)y zhop@ZJmIIA_ZSAk6D5rwu8!mv1Y&*K$u5j|&PL|2Fbpozih%jpeM4b`uGrumVpJFa z3v9iEX4E+?Zo|VSfpFR0TAtzK(9zM)N|`3|e_GPbzAvFhDaFuM5iNZ;JmI?}m2l3? zAN3Ix49D^oheXXclbBunb`m+`pB|8AOIjQw^X!oZY(79loqHncfI4nN!*;(}ir5j) zxI}f27`yl%MaQC<##-Yg8h-kb8Mlj>diUL1USJxjpCW<||On+7L}!TUP$WL(F9FF3vA&j1TH`qV$rFi<#+hfjm1Lrz=B2 z-#6P+q#MAzB_^0?5O>K*nJea@KY1c2MhK%@=GU^{o3FskBi+`tthBi2&u8hF1=d^Z z2?55=mTtMMJ=iU5cTZL>ncObY0x>eiusaS=Bp*tB^78CObs$!{)nua?Y;X8@-p^~nX>gJ%|P1B0yFx% z4Vl<@NJbyt_K+D0sqk5^2~|1vWKO2+D8f2jM^1&@w5xAN71>||+ao<LQ5YI&+f~}w~?Av#{(RW=&3(z zq&Th~QB><4lwzBW%f)0V9IWW_eWiEElQwa0|KzWbl^D}k$H^WC1@o&us8yRCo9YUQ zEx&_qCx9?LI|tjqfcRMuyfmXU;CPz(ce`IJ(U=6ni$%9*2U#){l5l?*eKY4HMj7wr zSwfI4+dCB5$gY0>*)4JB>pfw~6dMNf4B&CP_< zjq?H!#>Cma06!(G_)z=KYsx{+?PBZk7Nxxlh>DEH+KtYKrK6$K1*q}qX8A)dn-C-M zS-8B8CN}v!1+n)H(YIo&0jKjn>b|*0_6}qDJ^--eEDmqUJ@MwQamCj8zf2g{Wv^My z9oA>I-~QQ+?}Dhd9m1GOcmC+u#!|*~$NYWOCv~O2IJCgNdt`+Nc*2LA%dysT$?{(? zKQ__7yaEP||ATlfwCyMZU3#6o0h|Vlja|_wzGGSUCbD%gXL|t~vY=czR?og43tL83 zY+q7~ybHA&8Yeo~cEz+$>D~_?!Bgkd`zQrEMP{+A@b%w7w`W0ng&VnhtIqz1C7q`7 z%@Ov6)!bjLe}-0l9^0_2HVej=@6X9_YFl>LO#XgIGaD(At1VB!4CApOf1J-!u8pZT zJ%(;JTba)Rd~t+)Mc57JoxYb3ojoQhOBHwh5$zjG(O5HHoNQ~i4T6DJwpclsyPT7 zp|LK=+G+LWa^23k8>D=eemQtxbtri5GQSwkd)oSNYcV*t>1*@zACvxvuRD>n`$r3z ziSOY^r+7!*N`e&3RR4}1DL>d{B^aYIoL3Oz#HarI^ED>Y*ky1k&-_@s3b$Zl(|r!Z zaa+)tmeRG={w8IsN0L~hSZU3ruZ1l>?drqGSAL@a?h2#@OO#YHLq>?67wfB(e$RMz z9$XAtkiF;+^OGb@Gi=~+97l(U;IC{x27-yNHI>pabu6(LYiGv$-jY^thu*4mv9g7} zNG9K_ZUS^gkzZkfL5D3*2Q@=ZyXW}bUV7V6bY!Gcea3?ROS?!-NwhDj*7F^bsGH-N zk(dShAD7jQ-w1!eFkfiS@xc?;|FW0QNqFzlR2&vh0DfGO%?^Dw)zzE-(;ga7KVT(C z*C0aFi4oUm*a&$uFvBM)#6Ro+ucq{K@*+i(eYI8f8KYz&xk!%H=my))Blep~P|@+$ z{m3XGFCZl+HH);9V{`RVetQ6zQ7H4{^CP@GL$=bs+hXZ)tjlJ)ilxE?MnKbBRO?~x z^PxmA)%=<*^Ry@%4*I_&cTx+_bc^UX)Z74U1E+8-XAf@jte>{b8B4$o=_233Yr_iR z2*h)BSyCmZ4b6^pZn#-t!aHKC%5Gy>N^xg#@RY(KPM80Z`-Zqb#A#x7f#QVUIix)r zBZkrXo3}G7UC|92K7j!(nky(amzLHMj;sZSYZRqGaQIB!0N4UBB&8mkMtM48$6ssF z=7?ki2EKT1DPvlqh(xE9!pZZ$9RAv2MM&W3GvT5e_K3izn<Z+%zY?;$qjR z3YKxn6Qie)-t4Y9drUkv{a#s(e0KfY+zu;=T5xlnSd~dQ>&H?k`?m6pTr^ff_r>w$ z3H*%7A-p@VkqG)b|GE1aSja4l%z|b{ms33JCoGG`9&JnMw=0}2pBx!eXB8!x#hD&q zi%9Bh+oQH!mt+S89&Q5p)bloqbiG0~VpRfWSoaE@T>bf*u_qlq2 zj5xbBj<}yZc5W?E`}reyWm7lN9@q&tTKBYPPE{Qyf8%Z&TJFD8yyE$~Z_$i%t#Zy$ zR>Sgv4PekGH5D%>{(7)F4acQH0kaxZ1K0EmN;nQWk%;F33I&2B#QDqb`fC7%c zO}V&l4L8(K&`R@2y-yj@H{s^5tuU^8ySyy6y8wlxw5y}q5KxN+cZK19+^JoZjcPe0 z+q1Qut1=f>PRGhQ(8r;m%up?KG^(|OZ>MHxS!j3xf!zF8g?}b_M*HcwxIV)HrlzkK z>H)F#LL?cY?!?XRN4Y_ILNLT`II=*BPAwJ~5cKlT$Vg*fQ71p6qa)XN6c-R|A?{{r ztOL*aNz^wSs%Q6M$Q)@$@Fl5*l^uuJ@ug!Kz5lQqjzm2G&6kOU_$S1S%1=b4+n{JG z)qpmMlYg*|%8V`K_2KOK!(H4xxGcs#X`!1165SGAO3Ci6kN%q5{j|Q&ARYUVa$Evo zr=s-#qaeYIo+l6Fky>GKT(!kKUfetK_Bo1BhlR$U^{|}X>1KlTj0^jc>%--cRr;{; zhUXWJFj#TBaOd+%D~Rdt`aq?Qfvq*dE5~@NXF^@0%5EBGs#J&C_14nTwV189Kc1=5 z^}_0Mf0BfXifUE_h0XRoCc$Cv)8qY^Ux!3vqm3|ov4+?f-nq|v-)UHmO!hF5+3_Bs zpn&T>P5p7oOIZ?yxeK(Q073!VJ=>311fpS9cm>(1?m8vf&JKUYHM90TIE=f53I-Z9ftuqvC&8sNBwXyROgJK02u z4|`2?+yJivDXnuVRMi&$bmJsG2u805&X)b~u+Nv(+9u+Ti@cw&TEC6ebpMN0;_J{P z#J6K1qAgWG2Yv7AIJ@LG&h1VT2vo_HIkr{Zz~eE5XYwTuX!k2z0S7vT31m7FIBS2` zhgO0{fbJp#LsPW1^z8Octfn_UU#8n_^bYM+R)^%9EE=zEQ=^`*Vs<(!0|qoz(!;Ae zrp(57b;a>=xQI1>c+sTu4e=aD(Y+ecn8msGeIVr6K3rHAvs#QfBC4>DSFroW^RRZp zj^0zxoTuY&L2*(VYMrRQ<3_JYjJrp_!#4dMkx{ShubW-cF$HPvG3R6p+G^M77k#@c zCP)+Nmt<(#wB(-HUJZJ>{MqsfE}l|%KS4m{k*wEDpG=+RG%WHXrAbimr8mFWuj^^g zjhjmq5_@4QNq_s7vI`%Zg|~~iZR4XCKy(uq9biR6HNz zdmh8d8O%FPOZK)XJthe#HQUFT$!|{u6rUn)!Q&t>aB3oJkpd7QCwH=cx(8?c$>|=jp zBQCoen#USdy{z^xnac{?W3VAV4?QoF}dh=9KgcL>f6e) zsO|%tPAFTiBOYLblPJv2_U&8l*v9QOcPw6b>p<`;-&IJRyr%bSdpkWatE^A^+`?Hu+ds84&5O?4BSAB*}a z-K%XLuMM2O_0~f@+$P?AvYW!%CMMRcJq0{8o1ta;nUNiN0N(qMySTV`{3Kj9ZoSV{ zS!qi;T3Bp6qxQbNwN8Fm(wr(WY91na<#+2zx_OYFQqi%}4jbR7h+x`8gD*U7j1?%k zqfb|TR7+vu)R}^fSA|vjoP4UaSwWpf1ArQAjD4Y-XO1`Kyksw9KvTyB07 zC^;Fk^9{xlVpkyV9SnfawB9QkEEG-Y?8H)tvuK+|0A}+XrCg}Z?glDMl~U@e1_g#& zlQM%9yiP_)AGqf2`9`vZe(r6NOT!u$R`VA|oSST#SvlRGiqX>*E!(Qzj12mS*q?&^ z30sOMjHu&7`R8Qc>lj1=%`h1sM}|`(7@O`R>$KV?MpcA}vH@>v6t@5I}u4G=?gl{6%w2@V$8D zKfb6<3v_T{U-(iif!2C{H?;ljJdYZ((LoCE{=^*l z*~n8dvm&Z+l}2EY?}(~S@J3yRA(e&GAlm#ul_SBkt(N;+A&>wVn_Ga#3XT=mP_mak zWi}@fU^QO`4iPJr6z1`PJTCxMfj@ZoXC)NYpG{Bq)lyVx4=b5ZLty}e6}fYG{vqYcC@0Uk2eOOTaEXAr0tsa8GhU050aEzk3+ zeIo`yrlGD**Je_!8!ou)`7Ajt`*OOIsTkF*fb&bwn>TM_kaDK){{AnPc=yFTJUwY? z#i-Qd7hLz>GRJ>6GRGD5qtYl;>~v<1!^9`^j&9wZ9LPm|xxkP{IHWH{-AFey+^$`o zk5-Lkf;UeZ+_9)Yh5FgCK1jS8POhHQ{L^jZjDd>9$I7ctDn?_tx-nnfKnJOpfVa2VX(gS#`hySvT##-_DQVut%vsjS`BotUwkVF1E>tR`C#d1UKaMl3$d z*9UWDz!Dvy#p;Qk({(?tjWTAz37D38%kE#Dtadj^5ki~$yF#=%d`9!usLssEGyn_# zGoj-40S*fDLWWbOMCFieB_1AYB^UmD(SKBx2_7GaciBOX!`E_#odi! zRgpP8bFl5yaht&%yE-Aff73+`OPKV1YMMOWh5<66E#uCek5oYZooZ^bu`$^oMNukk7`>+5?ov#!*4A(0m^#k! zz^wgz#!INVK10OU%O+&=PxiFs$qwOa@x{i`?1qYspY7FEhwor-G+MYy0_w^jokYL; zSa?*0E7&I)C%BaEK}&RG(!j{^HOz>bLWRrj=2V-b*U0Ex=@J^i$aECo#FAQrc`C|2 zIJsrX1y&}&?V*;K&KA}lH+Kl$1MR`f)2EerBaFjEx6g7W7kl=DU0oq)?sfUDHZ2}& zus%iN^!i6YSvZlchdC6Dr|GWvWz;gc;uZYkIZMb*zFSgHNGM{e#$PXwrkBEp!wvnBWGX5iWCHFZouk)|u(Gp8y+4&b8 z9Z*umRmknJzKKz^d1*ku(nP1{f2EI4Tagn&;$lCzLr@3@T=LMIubc{-w(TA;H{jvs z^2l~H7(UDs*0-rlu5Xyj7O6SJw|`G9(?_zK~d~=LV*!knG|C z)Wtxon4#(EZ-^4S%5mvKW;LF>NDR-1lI(w~{y6=%9Uj*_!X)wziqKy}c&-?K{s>pI z8am69_0{8koS0jZ(A>h;_YH2H>#6$)6Nyg$?|1*XR3X)Qo07C*I~#$x?E{6J*6oO* z9j)b=yk^1CEzy(_LXE3x0FIm=N%aNEa0JzGe;3^2{&Wx_eJ=qJPwk9v?P9C|tkG~n z6QJHfwbYa;3UX^X#nbR8?CsY5=lc1kEv@`f^}2m@_`$NWNR}0@ZdlA_^4H48@u9sJ zE?o`G(H58E&F;{F?)iC~nL@$P!m+fO4ing#+FJ9GHbS$B45cFom0bFybTbs2N(F}N zCUk@2^wHK)OP>rl%S zlHwAq=zXaP6Y3Y+J94VI)zvJE)joWcxWiV)-D}+>r^{}_w?}hHyHT=74JszyZ!bI? z;IIpO(+1nYk;)DLZt4I1h4f&Ja+ub}a(m{Y2MdQTDf5PU1uXMGLJM8h!kM^@-LA1a3)F-y4;>&Y>Y>SG>5D?=y(jhO57Toc+|X z9(=-MabzBS$25PiYYQV}E5n}^AC5jjWr#d6L?gn=#^y%VGU=$v>P+wDox#|aV)7N#us zr-cDI*l>SLj*+4#Z;CMu`2FP7n~QQ?3HR}AwHk)-O&39%Py6QFFERp-tq_EIfbe_03u(Y(qE&hW+|3_B|@i`j{i_-4# zn#Tf-*o=h7-i-y8GM+i($k?O~8!`&gw^lUb%gyI=x{Z})84c)VD0_SRElvOe5wMG>lq|-y>iUFB!|u{dr zrn0xjIaM|`TYV?-h1G7fR>v8ATcvb++L4m=w2e@v95%i@Q@YQibo=loTTs@eykwH{ zcPKGEolpYe)tQ!Zw%&-F?ROVGtget8`B4agTK+wV%|PbjCX(&rN7bSq^;Qn&ED*oV;Xy5tp4I6$8f+pBCE?$s zW*1dUUfkhn{mV=v{#ujtrhj4uT3VL6p0+~bkq?(tR~;K|9jOdyi=zHmcE|KP??tG~ z5hBkPe9=&Cj!$hBVu4u?qS{^~%!24&YyB~_$gCfhnF;&jfX z;;&Q~L@1Z4?@={3AEPanFsy{ZIu#JzxlIV8?>*r((1J(rf2bQR018IBlIIdS?C(-? zy@|iqg;kxcyBSeQXVDuW)2*ZVETz(8H$FPe&^25e35*Uu9B8DL815_1!y2F(M!6Uv zg@PbGP{478e_J4~d9SsRfv|rh+{wdHYR+iLnehMR0))@st`!REX>62Ye>KTT?aSVR z88yf>o`pq)k^TL1Rp(&D&wr)_N9|@WFdED`fP>{vB|$Cv}Y{3=1Z zKtf68_zQQvpX+grgXg`X2tab1=PrJmjd~0EcdHAZyB<&Bu9LnSa}P2%C{U*kt^BcI zgPn?2n7V;mBe}4)odpcXH<$6MGfCdrQ7u5 zw145?8kj!R0pWmThbreQc=S34mdNL23c*rR%hD2wfYaJpF8QyX&m#}@cWn!|7ib*6 z#*a%Gu=V>vmY0o@UqkrbdU}{ApNuKlT(VV;fVmfq=4YGYwj_}6I`bo%%}V{?Y45pW zG85p$gaK_)GF|WGCSG@}cu3!RWonO*BRgvR?S7@QY$_PDCm=9nN3#j5S||A=_MFE7 z)5}x|iImS%HIqDnyV^*KEJwiiC1v@P26Im>z>-N#kB1ikcZGM4tbnuNhbgK9c_S?N@vD#pbSW0>pMj!-XuWN=B?|GJ@>1$De z{qs$Q28II_qR5+3u|Db+#d4ll3cb(iDi?BxNQ3*vC8GUasQ+|>_j7{dr?%g&c$6f- zEx*UJ+=lCw;YQbcOGzrPF5lM=?;vrNb~u*lw-NjhyASTAfIk{==&=-a`6+PiD!_P1 z(FM*+O?XECe3o5jvDWWqDM6@EQH=CNUpv-smtVO!v zY)@ThHUS@>%0d+9q;voUTnPS?o7~FbZ9bxxEY}AE4Fy*mftTl&-O&`=17)h0>RrTW^~Q5l)SAh&C2;-(J7fH)HRWKlI2S)( z^JxO(L~-qnb?3(lg{x!PVA=kLhDauamv<;)OlX9gA>*`Q**TM zXVN^PyQhoL^8!zVfBB)`u=5(xs)v%)lLyA8Kcdd|g%@XY?ys;r%UC)h0Qm6vDum~O z`?h~pvR5U^mzDfpHlK%=n+dy{skb*@4wtE|!YFx4@vk%NmW`Zh4YI^!uAKbVFFx&P z@sN|}aP<{7f6+4ZL{le~?jn^;_}W%NJ^;{7`~@S-`VJXb$2=A|5H?Cv}WZr8`II@^Ne}2ya^aLZtb`T=%(f~{zmR3 zq>dr!Ss?8g(upENA-?)Zbkc6#K1u|PC*|AT`B*7AP^ib>f|~yLvHVQ9!a3!=?)_W9 zHy1;)I?}PW6{(Bac;kh2OZY^_%AldijH|v^+A&^LX?$qK@O==y2v zOgiUE;MGxkZ!1eqyJvOZX}<@iPPP=94rJu(cVtv152z+8cqf$CrIyslcsbD$%-UjR z+0SH+6N3M8naKr2)~YOl#O|GERQ+ku7DeXN93_}+PVf*7N?qx>#xSS2=$s4!nj4aH zEcp@URnN4J@0C08xJ2M&@eiaQoA2>tkqlAfmxNH1eo0duQ-K4QSnbri@^?lEX27t{ zCIZr8xY>%{pP5{@$bQQ`*91Ml+8VnR{xYQ7D*mKfcR*MwMNTVRrP>pYsahTLN2`HZ z9909hg{ZUGc->evO<2yMi+&k4C?e-euyCVbt;bL zW$5k*vd*OecLG^U@Ep=g*a?12VzDwn!h3#D6`jy+PhDD>N|7v$ZT|z^{8c78WH+_` z>yG`EYd%3HDKPxDXkLv(_Qy_$`z12fJML8C?k38v^`vd|re8_ucn_$Kf7Pt3;sR?+ z`bZjqJ_W(Ke_6`kmVi3n#HN)2znYNqB^nDF;!3>iM6KnM@8sN@lFsKfFAFE%|F%a5z@-%Sy`-i1cStX9GJ^?cH|kEM^D^W2x9 zkZ+Jb0GHhMD6B!1Y-nX9BOn5UG zOdC$to|u7+KQKDPWlkdbIMmjO8(W>ADht&a3Gs6=Y4xuycZbnuuLv{C1CKStvC|6! zoxZ)K^uK#YDsU{}>%?^Gkf)WyADCL^sWA5j`cCuV9+hqg+>u*u2>$buEK)wtN`%AJ zxRQyGl50?dofHVsDp20XVN~XLQhSaIdA=Tz!fIwP$03!+a?7+Q&x3B-PpR;V=`62_ zelVdNUNv)C7bAT^Jf{1VZ&kzmo8(8cx-bmHk+Jfu?UTkvbLzD{Cwut@nyIYsr@YxY z%LeHeMZ2y^O5YGEwA~tRKNwp{y_yI$vgjN0#}4j2m$YqLEe6YC?S7J~j?57{{b6B^ zWHd1>k=kNbJ)sfiAmdSK-!dNZw$%vk*rYv|=DcseDpde}FU7G&z9)Ig04QYl>y`_u2Grs_7>F)VKCc2hf41DL zVzM^8yaw2RMxp$i@H+P+o6r;&T)%Dtyvx2s-oI}lPtY&$YwC^I1qYn1ocrWFaj55} z%jP`yBn}<)zJRKol3&;Irjx?A4`+%l;rnr;%>^KJn#e7^1>fe6#FVu04%P&{DQt=P z@#69(O?)ehv(B5{>RQm7{h%5-tY(AKTlt>9_gX0NhK7pHbH49B6<21HQYm=kvd1cu zs(gb(Nu{m&u#R@!H^Wsf(Y!bR*VQhF;eG&{6aJBf2Rl7r$%>qZ`fD#Curw46`QB1PnDRhN=c`iWx3_)x}= zs_#_&*E70bZf^F+o7H{*t+g0S;I}K9mgyd2psRD}sfWd+R!~wOX6^R6P0~cl6BID` zGOnX~5JUQ2USq=9noPJr^}2ZP zc!b!4Hib1jr`bYfmF0@byV3mfzaf2mQ`vF=^-^d2F+Gw_wSDUQlU2rlXi9w76r?*N z%!0j7oo`(um6iZ-dM_$`@&;O|a(kxA_#ha)6J)Km^x~FS(GJJ&Lp%kn3uU*P32&7K zsYy6GFhVhAH>guGUMAre2sK*#2m00MCz6pa!T; zzx$k_mI3-xuJW6w9xB}ck8__GKzf0@$P`P;MNpB(L3i7^PUY+6>5i=kb%qNQEi?<= z4vZoTe*^^85%G7@MG>MoVokQU2D^U#+Q0v2P}LO}LFUlMpnZw>I!7gZ56Ao0L&ckH z%IHx}IDe;OxMiWNa#S_B3&xJP+S$UT;1vB3i$g!Dvl2SHAlP438-Q1M}z>bSMrhn%lDS zh1*lV$l)qRw1eT6l_NF!i|5KtI)`B6wFq$+-w<01hTm`<5k^^XNrP_dUX^LgXJ(w> z4}!++%Jb;r%_{}kTto%R_s7hgIXQnypj*DIx;vC|U)L)h`vTwXeYMp-+AcJnVm|zM zHeXQ?#ivTDss}D!oTtY}^03nF?^+r%e=@kC1g}phRD68s2Aj-Vt-tf5*UtD&8&q!v zn4&F|OryGfEt}KZJy17BRxqC3=$!hVGr8HEb!IMJU@OiAxcangajoA1H0EkEZ~)i> z8q$1hXfG&8NUqII-ZeM8qd$vq-kxu5&}aA

o&+iYSuL>N?H{8?$WtN5JCm%b+0XEj_MXkPi6rPKM(ZpC-6EF~?eH8H_YnO^=2xi~sAEVcIb z>cz5O&-L(*=6={;V(;M4ZzF2A|7VJ5eR_QFrX)f2f@u;7^2ygsTxSFJeaE5CDNq=V zG2kj)ScA0l=);F{7-X5JxTL(`EP>!_Gx^T}Nar%ojz9gPg&*Ky3F;roc*qgXUofLp zSn%Dg!f>G>3?@z`QVmz?2Xrn>g?b!(jd_vzsx2Rr&h)ejw5R$P7U&ZIBDgk{$!w-- zH`1u?1qK=%Df53V=&Kwk?Jcgr=mtsfxTd4vud!)^6N08EXOd`83jpq?Si;pb)jBX< zUv)t65j3>f_-DpYGcD8hT7H(yE5?>Ns&yp>fUVi(x2VV*T>f{iABQmGL^!}*eJ`f+ z`m~%Q&>ntTPXe4aPq-Z&zDFxxe&TVPR}IezxL=S7zHRt4wYGK-Ev3>e+M9IpUxAhA zX1KVyQ@#ZAxStFGGnLh&8djQ>1M<-3@%mqvR#Ntkj^Dh$wLuS%pEV_nySmt)EGjX) zJkgtIob2uv5M8W71TH#5NtH_H4|Uj%SDWbp$VpG+y=Fspd1y=!K(PA>Vf{bmdK zK$fG@cyEv1Rs7=fvD260AJ47E2jGE|=E~l*DV_IE49t2Bi3?6erSlmJF-0jxb{Q-CryY$eJPQiOMjGkBXnA@mw#gkHF!Yc}_;G;cW7Q#t#TjX=Ko2@}&j zTQ@4*>pSf)@wU&^wqI_3?Z+jjg@`nwiag&fJ3e~)`PRhe{c3t8lSb+#lQI!>(kNh-Omh!UJb6@oTqIc{^}vA_*8XI*(PMgKuQM@^~l;pZXY8kWr0p@Dm|6VG3y6MWdE2^ ztWZIWht(Uu$*bf00YWjSYj6B{M}Ko|(dIWeJ7X~lXc(9|7vuX}S+w_!KT0+7C~X7H zwPqNYgwYF#);?vA2DH=@RsedsmN6@pAZ7Nck~VpCU@t#qG4?@@C&N#kV*B{C`l60& zuuVkwq75pUx`5H!0|rzuK6dH`0{my!BC7}M`$*cXQxMJi2)#-Lbu~6ZG=P$Y@a@CR zf3kJZ=iV(JwcXAD_HVu|X%JLm(MI(RNMKEs*ZZ z1{FEze)XXK5jvIWDiuXE-!Y1|@LAnFHg0=Xn=20Kh%D8GQh{p$%n~h4nGBUJ%CtpN zQx~qvO+eO|a2OUkZ1k(6#gi^h*TU%yHfsEff^;fI4@CfFhM%9`f^Ai{U!#D6ntsJ( zC=7e!aBC}%vqvUK>(>9ju-?pK<#Y}^@P>(l+{UhfL}9RUbz0vCPHHpBENz&7CLB)e zj~&LRSv)RCC9q9kFjKJ-nfhSR5Rm`ml zlXu=0q{btm#wD3UIAFwVJVOZ@2}AFPGAx--*lX=-HqXLwTXV=lRNhA!WErBp6nSO2 z3@P`M?;N$*P*oqK#VEcEZ>mr$(BaugZc4cA!N}->Gsrn-EG!O>1TE)LzMZzXq#|dO zWITV1Ayw&G7#L9Jd6_dq6?@2Cxm6=0{|vqV=1!WdiDF=?CHQ`thUW&Q~UD*bo%#3{`-^W|8$KgGrHrSWWG&^ZB)9K{jXU)6Hb`zGEZ zoAGIw7;)X|%U^{2YB+yoWx+c+A;a+^b3JAO*eqM^my7oKWVdK%@SrAc>yo^GnZL3o zF1^ia(hV)06b$+7D0qtCwDUReXTzi1n_#-p@bI{@eN|Io%;qh!HZ(EGr26E)ucwrgw(GIAmWYP(C#@J+L}u}1== zr5BQ4t^;JX;C|pa;@z9Vj}ZR)H_`|ex=TO%`L9qW2MT5O0PqQGMk^D6wtPXZBryVt z?FlS(y5r)Us>pf%Fxv)`P^x?KZjcm}#kAg=7+ZQm+vq3*hs7^QOtAJFo^fNoIATHr zd(70d^1u%dE09R+tTFSom;IOIlNd=CT;zI{j+WZPWkwl{5yFG4KvJB2v_XP2!lK<} zox4>clp*tAh92v|RFPEBanr z*iEb^No=#tSe0guFt_y0*i{n#wZY_eXdux}dwRnYtlM0B~&XKC2`bn@v_ zq@f1bE8UedY5HFpkbeJgi<+ncAUTXhJ?c}oMk=)|Z~GrI!p8evZFJODqe|GT z|E^x#dt%SUi9@L`CJ`HZu!mbqt&R$71RXZW zg6!LML>(%2*^l^9oulb~3qBCg0p0@~SQY#VfX9<_0Jzv0_e{2HzP1E!sYl#IS`(<) zvV_oD&ZTvss(Btg{PNetWKxHlkt5JDeOuIW>3O zQ&t4W6o_K)96WD_89W~XxjI!GvHMFOeQGb+e+Yugr|*QSR>JffQ3>#3^iwu*D@H4J zgseOtUY0nVgssO4zW(5)FZ%EpPvq_pMN=V=3eE=$tCa)2klJNQ@`ONmi^I~JSbab} zr_jFa(V+-%39p({;@TsKw=2su40a>*Hu`9A^@RNT`oy-Ow{-y&mK0d_Vrv|$A*93k zDD^V(^Rd!s2jrHoS&O3~Bt>e#ie2@mIvN?*u|jqmn=`fLAV4+xU%ASWmEVC33b-oo z8bfg|0M7%FV*f%^)q-bi97DBlB-by26=KECogDL7D~Lcyxk%PeIq1bM6SL2x$!(o$^8t&y`;;mDlRk(*u$+{QK4Snc(44B$2{gc(A1 zLXjqYGBMb!VhcAm<=8gtU6_zfOEu(p+Xar!@icLG+JL%7fDw;lh}0(U2u4YHy6g3%A^@P7y;j4X_v-){EmVuF<>N9+8nGk}@+v z8v|rcO&1sTC#xN(0E}7Soc8kF^gknazKPhsA{TZD8lYv7DJ+i9Cz-ACs77q<5~vfJm)Y@-I>s6qTHd zYvg#-*|Whw>PCq&Vkg7|6p-EdRNK@-)en*<_$s_!D}ldETU-Gi)b4*RSIZwp<-V{D zeJr*zJdY!Z0e3~0`!i&5YZ2;O$vbTfi#chstH8J9m1h2WMY^^-Cg;1qk}3%lIZ37_ zn^bIrTt$0R-_*On4YvZXn(P+r@}5-x?~}|^yBQ4-PfT!1t=4JH%U#)NytEO)56rD{ zT5~fXszh7lO=hw_YKl!kKPCqRDmME1z{v^-07`rq3yYG6hhB91pQWIHVwS0?ixPFz zZ4w`yZipX%zbXhmDej(XDL2F={tA*L@q-@meZinD8A`bwCV~r9$oSArUu&v=17oBV zyO8>F!sdPwUi5F20lodz`=Z7th-l*IELvBUXmJ_ZiXz;S1+SdR|1q@>to zim6<>(aj=)8j7%*`H)oj!)6(}1K_0c@SrZKpZWBmbmc$Pw7MWTHKGEWJa54!Hw1YS zzPt{@yq>92Q4N%qZNVR!ab3wu39+Bi@}?PEnMTZwTL^(!br}$*K|==FmtO2Oj{dam z?cj5&+ogX7eG6&<>DULRQ;vJ?s_@C@k&+8!+EM1}ya;i}bgpp;9FvSAmPZ{C{~#f% zJ6#|KdH91*3>C(pHq?aN1@*SYIraY zd1|6pGiqLqHmBchsp|;W7G!c^7Mte$U!+^Zo`2Pt|M!z-woZSnHqJ)AN-oaUNR8Cn zX-{Qm&w1y2DeIUWF+VK1Rs?8h%RG(jwP7?5z}e4z zyS^E(yMvJ3+@H;Cd?Ru~u-hkpFvqU*?cqAM9=3%vM`pqHV6<|lkAmHH84IA{#P+W< z_0c=&8u@WtyvCnbS?)5YbZ&sO_|2w|9ufH;Z`jNiI$)CDZ^)CGUNMY5SisSc3c1Qc z{q==`-w36QtTr8^`MbUE*#zp^6kgy^{4zx0M!TlGqFJJO*sAxE?u|yO-&)ffej29+26nGp@{n$Z4 z@e4J5=j<%?{x`1i`DliRX*^{0-Di9=J1K+bCutfBTV2uawWdHNHDz!%(MV8zeMX$5 zAQ?Jdmui6Rmu9E?M@oIj6b--92pXx7?p3NX>;caHP;#{<6jTcV9|E*9D#F3l)TZ7i zh2TFM{(fHw)at}daAis{E%lSdqJ7Z6bBB}&e=T5J@?p#(0^_?E6(Fyk)Fl)u;ELCy zIaivf_a5BPHYFW;=$ZOs?CHt0n1DF7!5nmOum=QluET~m#s2?+1R^YUL@6glOWopc zvOU5e^?~bz!ZC9nJTEFy3YftD!I2&FnsObvK1Z`sun*7l#e~k}zdCF;ss~$h&W~R& zUkA>s9)D*~HLep@)iz2>JF?D~YlasB*K$^pls>1``;aSz-6aINWA(vF@B8-;f&@IW z0eiT*-f-DbBqho;3MpdQjkdw7FfEK)4LwmM2^bjo0TZ1+{wQ-&$IT=TZgeUTalz;A zY@A+e3BiGRBx3d}+~DbQtPdEIEpaB&*(2sH!!a;Td%H_EK9RMQs}&3v^0w)Xr3TfU zW#P#i9hb4_JnhP&ym8|ODrJJE@?!!!LXsWC0+iK+%Y?a}N$}0QHtw-18Xg|pUoM|b zJ&P&cY{N`woB}E7)XRf7ZB}{)3U7cd1YT>{(%TcXZ|7o=fanTTIt}o*%liC>I^=JU zW}d9=?-G)dGl3c=bI(TOID~|Orz;Kd^VlT}-(bNs?PC8*) zr{NuP+wf|h-&}3|fPHMCnC&;D50f0ZW61?ae}FTSxx;!GiNEK#_t}T51HaYf7{{^7 zmdq^<|LNv9lnK;=<3Zqhx7v(ZV0!`9YYpkL!0XU?;ovOSQ{W13%n^@Nx3qqf#U=4g zpQc*qP2VXt$!RrQYJ504wsV@Rmy?tGtjGuFmUpiIcXAVH6wUqK$JzJ9K{}r(1Nk84 z@qyfNN6Ub!*~5u5<}g357Xup`QEy%1h!T8GD0T5$#9>fAGqO#W(RuXm_rd0b!K#!T zs|nrL%N?i6BaU)*uy&gTqwCuvvS0=<3$dD?A9=`db-15%FXzV!j^VIosc&u79Y_)p z3z$|HJ{IOmMoH<9>(J9{2@MT3TBRl}$)4e`dMp-t{G1VWgW`NJiwLX*DfRqXR7cQh zx0Vx{tj?q-p?`x$r&=8LqjZHt#^St{e@ZQhWaAT*mi5cWh59m$s^e_Gix*_(QdWc| z#Se$Cq0& z%rh{WxAjdYk`dv@Lwe2L;z=O~k1xoubt3}P?n{|+K)2RcMDKRCTV;%5efpEc<@7eM zh$-;Hk_Ag5MgEyfdRv0Bed=JoK;3#t=%CNI2idUNA8Lb(l&tddg zNR;rku>KZw1#1s2dVW^#UL9t_aW6|^)6%F8t`EratAGLF2*JmEhu^KE`YkgqF}DS~ zH+)|-L`NpPVBDyS^Y#&lHNzMlDbP9+dplzk?uxlnTP*=&^7-vvZz^0lD{cqHPI8)W z-Z;Ws0rYC6-V@C|T+#b8a+{cgv75(szsuN%I$xjY;gvu-2ngWc$>DW%&ZV`-0i|=L z5SW&A1`rf=sb4zeVc=1S&#l;b^rT0?>kb8nK}U?k5p)LAGE*q43?}!@tRLQRmSb1& z^duOFB^KrIwNn{eLqA(HFM34$q?+F>AaLvCbZ6kDsZ&{0NMW>_*WBcMMlmq@^66~1 z?S>cI>;Vz!qPHzPf3na?S`Zv>>=T z6z9hxL)U@OX|^36`agg1eZ(LMD48+qnH#H^?TL+y z%>M+lPOo8^E%>UKIem8x^X>g~t%9gbR1go|D!Rq_5vnLZ6y0F#NbTWbo2584f9F3w zH-HcxneF=ml|ZW<(w<5D_U6UsyZ%z`P2 zInVuCn#{T5ay)xG`J^+@)+J-IyyngqcYDHAL_|b2)Qyr(sbvV@jCaS`9yku0fJ?MP z$Sz;t^tMh?XHuRI8Te9!yU#S3ni zwB3QReX27!Ui&MIj2;uZ-NNdj5UCG5z6chL$G?-JiMmBoO4aJQSAa(#H0hMEhRZ-J zz$~f>B7}j2Zx1TNhw|cadM6%brKA+s@V~sRm`34Uer}^vCF^R%JiAf+SaiQX|Hne* zNaeTkEy7H%*dXFtNEt#<8mCpEGL)I9M)tCcVy6hq*OXn}Q#3R*OJHNHO3WzDwyk-Ic+0xa@)JqwY(HY=UlIZ&14R)lhu(%xWc{E3=cc zi1+a!EERM+Tg|<{Qrqc?Ry(g9Qf=@Dz}(KazQs$-xy6^P-fQ2tC7mRAulb_u#BqeX zb5WW3@YOP%Gyioz57eQXHKln7OmsTC61tB=`#s=-zD05PNlKG#^~0#sAsk6MX$O*e zWu`D}Qh>dKi<;N4*Pz3-`-|dlU{C48jHtv3o~^t^ZrMU{?iPBHj{z=YEWRxIfA-}r z$8(~+647(Lrc3)*vqPA4Mv|HYMJ&-14ko~22N_)E&8`0H?-aPK*^p#L?J}%6dJP7Z zY<%9;xpEO0Jy7he&qC3F_|{(5Tv&(iP2AwL%YVp(lJ4whP!pyTN6WWymFQ8@{fRb> zig6@6Y4=goIx@LL8UH~f*Cg;aJ_GT4ZV>|`r>s5wL$JV?lUax>t*(!zA~7T`wz+qu zKSB@%+ZDE19_uR++{2GbAy@R=o=VKfaX`8! zLNX^kFx_%==RK(yyVZ?}%3X}>4ON>*;B$0$^}F&@wX2Vj-zR6DR71b}SpFQRQ#}|< z+*>?FI`bTfU#J3c4skTy)09AUjpDSJR&6H*wR$G4$q>zYDKJW}jMlF{?(cF%b}FA~ zx@PEd`;m0M+>m?nzoH2IP{jp^jzw56vAG)Of=Fa`D^IwtnCv8MzRF|YJGCESy*RUwa0Pq2u3=kGWt zhXU-+w~jZwPHw*KB7q?X3-5Aj&&eEGGECYbOY-CLVd3Eg?0-3CAMWnZF0Na_y8HLM ze0*Ng2}H4I#Jsu0CYui#HD>n12k;#os{3zN{#{k=>anMHrY%M!KxwKQo2@j$z{$gq z=dnHOm|}tIblv-yIH|!CvOL%1o5`Nf(Wlh8?Ch$0e9v26VRO)3dFl-BE6k&yKxdc~ zUK~dAz)2xzZioNI+V$olRN3g z>UE85?1bjbN|nx#!vY@$5RtgR<83rpLU8m(vn5}DeywNGxp{2rve^EE|V)wEhrTK ze?cCC?ELs+HAZGow-ff(43`vvd-;d|u>E&TxF~5TPu+m&Bq)s%s{V>aRd%p>{&WiD zRbVh@flhcKr%~Z2mjO^WrCsceOq2fW(Rj$vm4Uc7d%F3sZwU=w=g_-dFTFpo(CZp5 zl>L~{`#1aO;8O0muMgHmbh*}>huwPdM}W%iP66S>{zN9FnQUUq=E>$^E|Iv-eX<=> zwvZPNsKr$U49Xe$@*GObU$RixV)SP z*DTFO%Ll{9iQiR|;B;^ix3h!NlPHCm2y;F3J_SSNj2@%{ecj7frp_SQc&0$NaH>{{ z+qyd;bRk{2rs~y>e0K`T&mrb>fH~AcK=48#;7@$|-QB~ySBCJjP&U0}wRJdHT1aIn za~N&nWB{K;@Xa1JV1jxwyGC%w2BjXIgxjB^Og)EwP((4F)O`!(Q$}9O=S|M1=LsR3 zZ-e8RhXUoP)5qDFe3y58FiBSI`^V9bE{@A*%i@)t0v`c=5SiqOuGXLyzod(OnR-Q` z{+^!SYd{Aued4ea$??Rz`>1*jDXR2jM#;HXpr%ustWl#c7*zi`SK0YSKyacVaLdo#rEx1bz@Mj*QhQ4t3Fc1$)wj2M!fUU zbPTXbsm$m}_j}c}m@ihs+!Ztpz12QP$#JP4c)T`av3g?P{dFd>&F21YbIqKH{fFH1 z)+3r|(d*(h1l2wHXhZ$>PW`pfZavT6Heo<+&XHMNww5O^jta2dc%3jSDGw~ z-i8**A|@Z9B#)?p$T2od7D@}L{e3M_Y zho;Jh^wC|mEG8MW0SUA!VbF?Q^YIsie*g7lo{>+P?Dv#$_Jw3$5v&i!gpwBVM838{KYiwOE zozc{lScZz6jvT4kg!rs8Y05G|M2~#@H+4F>)@Cix;Qfsx{ECURD8~Kdy1BgMT!ZA&9M3rqLS~M@s8P zTZAp>e33Iu)m@cJ%CL3Q)A|kxCiQzDA*aQJ6(VADyJNVac(}*nad&F)68@c$_By1Q z*2seLw)u>zT6XuSxyS4OT1_MpvuI?bM(5w`&zsEX2I{n*I9`I(A)O?n7PbT+szgI zkBcS##9TtL1S*Asdqbz&j=|{l-`8cigoK2m^-f%yyv?fA@~Nf&xyJc3B`Iz!;6jcLCsn2cM<`C`?Hc_R|;dX9%s$U&KD?4kCX!|ddkAXjRlB`+l>%Hhx@ zuTM7C;TEfDRh?5vL7;Evxl4F|{MsJqkmpl}o2HhWzpVwW9q%;~P!{SUPjYzYq+fq$ z>Dncv)DflNLFFb$45E>KR}5EXsm-67oSCsaS&{6Ss)g>JDrv%r`+3pC&aXa0qE@_z z#pm=G7zag&N>UsTAcLe|@a3^+J_D&J`PU?CbT=)~)o(PEi^1%zt2~vXOPI?Nmd(lgSr+>C{OzsCYRKJw7@r{Be#kgUbOct^PD@VV$r9 zSMC8yRhxI&J5VEyMjr*q9RI*^FZ9?MeKSzcmfmf35xl#xnbV0ti#G|PS{#=D!KcmV z?0Hh{#UYK{+KA#j82k8RuGptx|N3YzBs!Xde^o&3RLA#gXuuiKPal|dAH9Or{Q+9% zg>tQMWZZFssV%$NPL=~>n!gsYgK{Ziw;;^wz?==s((vIWLeufAvZD?s9E5Q!7PEO- z_B_u@-Ne%0#FfD5(ZDBjY$#70$yc?3++R76It{PYC*Gxk8j(A-@>(1-mpq(=g2@Y> zhy|}i;RM`X+EHuwy$km`UFfkAsH2L~8f-4F=-X>_sU;7rOO-|{L#H!TsA443U2UeW zn{m~&4n+~}=!@tY*?<1bs7W~<6?zlXNb+1vI9iH%bC(4#;+O6rIP(y*^eT|VLdsW> zpj$Fbj2@@$u!8myM}8OYSvQC_vW$@mUp#UFzhVqzgwN1QJ$|pr_vn49uM^s@*5A}< zCMn%|YR`yxe5$8^$k5$R#-8Da^9@BI2K%3Nwz?U>L+Q;^q*!7pq_d{3ww_Mz*ZU$J z_%`c9{p*DtX(h#iR7!#=VV|~Hj(Vr0+2j${stocO#tzB{`umlA)(aCYTiFWI-xBXy z_$7R99GJ%NLU5@k8BgwmS#$>PX>Er5x4t0?N?&Ic{0B1PmRr^h{0|g;8v7q;8@gp_ zFftp(H1zC-7(-Qt0*>Sk2X^oPQ5d>aZIxkD-2V6RnKUMxw06K(Xtu|aWbd=qs;XJDX4PteCGf=ejam*npclS1L>N5E_2&fAn6goy z*kWmYAh9XvfYpljpPm$~XMmHsT0i)shYjzUkeeYg{4vGAx-`_@9?dL&gU}a(gH8RSm%B!!dy-EP`cr<^j3sZlF`XXGQu-iX2f7YQLvo#Q7 zbo#`sni$Gb+Qt`^Ua3~z08Kd?nfpG4#zEt+@Q?pZi`+`b= z@ymXk!>wuB`kk5LvmQ(WQg|~JQxU`N)fkd(v)++{&`Y*5-gr)YFA;2uc#%Td&-4N! zC!C3uXoWrB!v1FQO13H+aiDSnxuO7nS2~BkuN-kOYqPX|kE_?+e>QL9>2G=34(}!1 zoHh8G_n5i^x9K<-bPr|DvvV!xxL?!59wa6B{B=hb@fFosNa<8#Htzl1rnr7->Qy>f zs&5 zb@^^S4N}8FM))>hQU!A8aZ}~osPi06B!Pj0N9kuMwbE(xbU}J&pM~sfK9-rsZU{{4 zIK+bPI)hkY##wK>h0UYw>vCe&20=NfB5d|dW+{SpuDPYF?X?bdQk6X5R(qHQUy(Q) zIFnf%BDDM(%u1lTp!W|Sbkkq2cb#E~;dd94{#=GbI$zgju}XLSl~W&}Wm}~KHG$-3 zUqO^G$RGAh?D!WY`1W~{VMFL6I|TVB#qD3)5|+g#=Nt2-LNL*@qO%kvdze|(X8L!u zNv?;qb1I5BGE3|#NyJ!g_D}!}|2sCRk02J{wW7YTnbLX!R9A)G4m+sp?&av|l?*-U zM>s^e46{JyB)k($7_;>WWgwckkMMad zu%#u;v9CrZ<>=P2{2m&LS>N|wt**$qXE$CiMFrJqM5Sn4xumAq^sTPz66>9-o0~?o z`RRDogNO9(lTfLCI_Cat4gpAKNic}0=cgaS$sn`PtC;Q5Qv8rRt2~iVp|YOSqOq^Q z*L8cSuGfJUXSK`Ppd|e=?aKDlJ?7HeVXwLBY2z3uXZ+T)l}qNR4itsSOovn(R|5Uq z#U94g#$Z?o@7d$uC7A9R%E2wo32GdH#2G}BnU?2($`a#SyUwSBpZWz@qr|GxUo1eh zomm$6t6n^*0c)2sweS;+a@TUkwm?m9$s7XIkL=vsZj07T`kbqbj+PMmtuby8J|)yF z!RFl3*qnPIxvXfFTaB#)zmL+~U~h+8j3R$c!>_R+jDr9jZtRi5=c6=?t8SE}n73BD z4<5IAtmd^iw8k`qsUuk!qWOw{VeW7r&rIb74y9WZm}IM z)V@Ba9b9b@AMX-{XYU6wm4{=$%D!pL;@irw$8&yl4|bYFLG~nUp@_u13;q^b&KEYj zlU>k`b#W+GRuJpDBwQ19(z`8ZFT`81VkK|>pNdh-TlLF~wBk(#(n%)AEJ<^W07q;W zfe}rk_zA!cILb85BimSjX*p_FOb6bxZI14<2UD7KPv^v+An}Sb;o>$zR&TpLSWnT+ z|6sHFz3oxDYAjXHU0ToWHzcWQHGz;36wS#Xqc@!rPZ85W=ZHn(dn~ye)DympquEPx=sMxr{>aCqTL~&8N~o z%u1g&lGvwGx`C#=%M%bjyc%3P?*^0O1) z(PT5(Nz|{{T!!jW?CHH;dsLAM?m*W^vx+v_e&E@Pi|5Ok2lwfL`RPo_TX0vm8nkI1 zrx6LIiUJ#4>jr1S*G`QF12;CwYT44O}AwZ|sugn3cWablfz+c%Nw^k?+I6mu1N>b}k zd>%QSy@m1d2YBtfgg2+LJ+L$Gru!1=&R4|H8T#G_*gGKIM%Q|2yU|KX%&#{U>d0rU zY-k(tWFF62Q|v7M>HRzK%96Q`JEiXBYez$)EeXqQucwP4nptKueaMB?X0mC4oZAy8 za-As~&q?~;6;lRmRZ%8kr!{u*qHceFA6TyN3uZ-0C(zN34o%3_&8@l1@D$;D+EJ{> zAr0x`QuwiC-w3QxOKVG(`ukmlQ zXVftw`Q8I9@`9L+tD!>8Dwq8?bfcHbf|tSlg@%VWRo;(RR;Q7B1l+5)6m+*KAFH9! zevk9Tv_HxgdO%*L?9~^J<)@Joeer9m2!;e*`3=V7J~`Xh1Wr65YH0E~#rZe!jOPI8 zs*P5fJ6HHU0EM(ay*Rk_6KhPH# zCp2CcFOZnV;ORQC-&3RzXL;W_e63Qge2`>TR@OFjZ6(E9Fs@DlM)~OrSaXWK!Om3qXdb6=&N1a13FEJDI4y6v*cD zS){nTLz5*DB-6;4OLsWuMQx4 zB=+GSUiCF@*w7{(%y{axmkmIRC|w_Cka}pI&h*^KvTWCamSFvc2uCarhszt0({H#; z#q7haLanO=`B9!xv(6h&9#cUaPAXnWmmKn2%k4g}*Jvu<-b5#B@6|L?rp4Gg1w8%) zL;h}+U)7&|M@gwBqEC`_y}sD5W0x|`>9hv3oozOH=jp0kTR~f5L8lk`OhUr1o;$^G z?3%2_XGL3XkB~ndH8X5Tdv_z8@3=eW(;uV*QvKA3mG)*u8EZ|f5GT8=Itm9o(6Nz4 zGKRP<3=;F8r2p!1U7uKi{x=n_sa}zV)C~`wu!1mp{wN|959iC!5y8&<%>!j|DxHUJ z-ot5a*lzfkc_!$mc6+sHu|D_J%)s0*0mrabH*1U&XRNZ#S+o2l#Y&}!=%I?Joe%59 z+BH<~K8u3kSbnuWx$xY)+vi#Hp8yKun9WFV5hw)^6RDcW^ggLXe^$Kn^yjjle>M>E zr$+Bd)qSJ3!fuS7(ktOTn}k~9NGjLIw{p1NmiV5rI3GV?=^gW}$4cz|)X-5cloF@s z*Iasu?f)^B>yLxQ0+l^{>xFmt=NV20(TO78QiSKZuaikHG_|3j0s7+tVCStSfH27K z<-MXe+!<=l!?s+iAp5%)k6$IJj0_Cets_g$QCm5fyUqS9-AH6Sa;$SuV+-7zdbobo zC?47{JhDwUOnff*xtDmmP}bLzH7kUdl0E*nj>6|D|MldIK*M5qIWXZ@*^)RaAo z{UG5{_q7@og}rnkZ8o2S{Ihxs2mZ0-rUMgplsoF|vKV5RMyCSV#D_Hv-C3spmt}@@ z{&wL$IFaj5bN-J~J32HHxS$i-Y=_};0aR-n?>IO^@yRet5_MBQ{1ltbMuncIWw)5u zc4k||(arRP=nC+KMlG`~)b1J0BTB8ahQ)PM2M&J@j^`x|k6+9t$_$&VoD`G2tbESF zff~BD@9GIdJb6h|{SmD$!bqfD`{h@w9{FQ7vDCMguY~H%1 zRcN(BJan47bPE8UPKa4v?xFWBtz)3~hhPb!h^62qu~=xq1o>~*VWX%|z|WPaOR2$X z15^hZm5u}_25$QjUab-gsPB>9p9br#cr+%=u(jB%h&3 zi}-lsqdSu`UGEUltM9uQ9GyWbI#W91=(0EdHQn>i{)Xtjcs)8J<5k*N%$AVr6!rQ3V-Bz1&UdWw3OXyb!oIG_082p-+IP8vx}?T^eb$3|)z6C* zJuT{L{LhX;?unJ(>Am%|JFkc1KY9?`K0uQTjAjmjOrBTAy)+tr(jT9k6r&jO&lpCE zNoye;6S2{>nW8ZadeqyG?vf41N{G4rL#rZgp?|_sZS~UG{0sAwq*68(77ImonXeh< zOq2|S(ZMi4ugu6H=YX1GuY_e1=+rhgH;qb_e}Tjy+GsjW5rT}Bs}Jq561rp{k`TS~ zLgMI5KQDrbq%;4Eab_T3dNBawlq&k8^MlC|lH&n$l(TFxt(E}Gf5;bQH6!%EdA0^m zlw%czbhyWp8Nv)#oqt-!DOz2V_(omuaH4NekLsh{;M9&P;)Ha8TROMzPe>3Vu8*95 zHG@buJHx{t#B>2Cm^B7%2mREZu)e`82)mGf)&ggGtdhzQ2dZt3^glP6aj5^)OR_8N z4I914p7W49n*G@g^qp0Eo`g!`AI3hIVR*OsUXvC2vBSAG>?qY*+LdN}#@;nkud1bg zn+0+$Wy)TgQC&ug$Y#%0O>2;5{h{))+Gl{5{cHlG5*aU*J`~Hb(1*%Vf zrYjD)8UK)c+6)4vbQMKo&PdhuCm_NDHxlP8hFsV?B6qdFO+zOnjVwV->>)>`U^*sK z8Em_J9^?aLGAL~Xe48trkV*(;89GI}&`8MpH9u56q??@=%IW2>(4fTvB-Q+__A*-% zBJW$eC$d{P1U*NPvD2>}t8|r&v9h&Qck=ZO!NL1;f%HBv1PIbuX1{ol9kBB6F~Cmj zeEtdg?7aN=i!bZo1y7R_H(*_sdOq5SQ9oePNM6dVV7s z79+Ogdg2t-CG0k=@3-FKVpVN4qotOlkQEhhx;R5$(!MGGQ?o-UOZ12x=g4RG?ma{L z2Z))n)?i+awfD(Z@O+P~#T&*jZsV+QSHr!?ex*^LV1KoE;;Y^;1$|#Pc{<;M6ze+~ zxMJ1-rtm3^4ud3uSGt?Jo|Q_TREqa5+X-TZaX?K8BBP6Ds$Q{U8#}TDvSoHu9T-`$ zwVA21HBI?cU255{Y`))#LjZPzy*^$w32(veOHSf4teNLwnx-5{b^9CA8R%=1Ap8oT zDVy>)y3a30KdVm??8P9Igtx$C!5xg0d#*49@W+I__XyjS;4&RPP^PmB)kTr%;#&*L zjYz}Z}b%WQDUsb;s3haVh#eME$||(8SRV)HTsPCPs`|Df$V(>t&0L(|X6+S$fM@+^%k z9k9*|IbM%9HplmFT!&m=-T;CRK>DPEo2WvOCEyeK&It+VDV(Trv3i(R8SQySC7xHP znErIVHWqJG3k;98cf0)snSs7B-(g2ufP+$mcY?ltra+sgU;(-B%j1Nh#hxDONK#IU z;MCKD_p5MyDxt&SaFeL+WI5`c4G#AX8;<0gm-AJDFS~pYdYQx|2u{Nu=)W5Qlp+3C$7O%xI@jwp5@f>&qW%?y4LvkLfC!@620NP` za9EPc-RRJ=(m*tzs$Z%3Os))IUUZv62JuSpc9o?|@O+k4hSAd{jKl&hw6EVI^!;$< z`S1bV@2Np^M&{(c`y20EmYkGS7i`d~=!)lcJh|I5XlDL#&F|c19uBCZ_9T5vTE8k$ zNoMncF&jw@ib$X{WC%*&A_pZsE=(5qm95g512k*!cKj z^lUzSo3t^}0_iA{{Snh-uV-nPa%SYl9@MS#k`Lz-D z%=23SscG=%bA1Y?WqFaXLsuvl-F2g~Hq}>3Pzq}b%{zqs{5Kunw5MW_Vd?!#L+r0% zMmZorsY@7nh@XJK(xB>J0oc~x(frR_5PtmFc;72!azSWvJWkDLrOxkIzhFRS>P##} zY5QhoW{2N4ApE2-p=H&8JuU=YhUjw@brDtHa`EKhw0`DBtnb~72k4K*CmL!>ogVx4vNoohFRSg& z{8YDG5!+u}d6;xt{XLw_@*d*^CYq!$p4_3p7aimo9mZmoK=0t8@~y8+^i| zzkd5Bn%6IfMoL=de+o<;p?!9wdAWPmxH_H2U1_;}S7c$vlx<~U8j{8N>_6J^68U1+ zhO99D2VwoC)(N&hI;(j7ax_h(2^i%HgY$G%_ow{bB-s&fKT4y+@( zZH^u|`!m8rJK}8ATj9}&ldY!Dkkbt2J#)See z_OF(fq+cay1fZsxLj^K(R9_)o>OH3@n3ROQ5YC-6qXC1IVVRT7>)1t{m|1Q9(C`%s zfM}A?``{u}Y&_s6OARTWm_pzi{My&0pt5R;y8C1u$fX8)NPFQbXxU< z<1sAw!M0;tvs9E7>EZU&8^X5ny#Yq%Z|BkkAz;%RY~)tDqz2`j&ocTcM=huUvw2ix ziG)Iz8_wv%P62Z*Vjr&D__fhvaL#AChy+v9uOqO1@bkhaV5Nzd7$Fj6lAP%hlq__qjHSayRjfem<7a@virkW>Z2^4V!pT-j6i z--tkCFO^;VeOY$5De)v=`&7jG#N{UGJacAXcEEt5E4siQUMmI=ecEKKgjW<#gS-71 zs#j}x2=pMnhE@&AwhC&*{+AFrA!`ci5PUG zT2IK!Ly2m0CP2xhH(h>VXbJ-*o--Mus;FqsM>t7^j*Y^V@hDU(uBhb0kGQyt2dlPo zq-Os|fTadXG8?3+^s!SkOP`mDIjRto1Hfe1pe{ctCY1tZv5rDtlhK0m@yewg&=geA zGb$`gAc*GU#q3`e$839NZog~duE88Mv}#Jz=d7M;3RAhjC_oNv&denCr59yH{{qwc z~Qcv00rEewrt1M z-8HCL#1`lCp~%>|ezM|Q(Wh@eo;CUu z7@c7KDib!cgo$4L)Vr3nSx+r zvsMbQ&K~yrgZ#-+>b!d~U7^gg;5Ee4fidbfO>d66WGtsma%w zO&Vg2eh=1qqq&uD8=EDaYB|bK76wk~;~=LC&{)cox4Mn{(LoEo)pV^rT%0$3eCcdd zRJ8hPE6fi~pwZPaUa^N9YiMU@?3Cwgt?_i{m)Sn(Oz%gbxWmzc+hX8Bh+pT{8* zw6g}rgzm?Zn2hw=S)j5MhC&idx2iT7lFe3R;T!nwLy_w4ct|rbXcte&HS*yn_igQX z!eg@4e*5d`l6$j;-PQ3uzu9PdMS8P9rYC_b6gN<*%^+&p!fvzxMu3r2M1^* z;{DPy2k8!PpuV&l&sOiLX+=?Z(UEf3KlBH{r0bcGPwzBMjCfz#2*TdJz<-tgBpCy` zaj(uHr{kWS)*`S4z$q|k40D0DQZFGQu_#XaPkR2ISC)+O>H@5MSSl zB-49gBNR6!jzf>|MJ)h{%?oNkU93i1s7c)e2`6#(c4i!~_nDQrCh3o0ubflkFoK@- zBLEAOlbLP^0~)Vr?njxmh@;+wETA}@efD!VUvEL^&gApLiKC7Vs;|dRWY86PGbb`r zvvbwQ-1dhqwed^%(Ojh=WwU;k*9LU!`6fLo6sSiCFEB~}M7-WNt7nQ*e``D3_|E;< z{r68znF86D-J{C$i)4o(wa4dvOcN<-0(!y|XHDC;jC!X+Z*)ER_46;kSio?{@o7vk;z|h~2u7i1{29yYB7!Z<+!P&d>*0&~W*Ta5_w-?STx>iv_ zG0#m0uupXdUXd>*L_m&F@iCGT_W1iPZ0=9sRv_j(7C*BF3QrNVcVsuD(&qt z#%~a+ZgbPMM6MXYrk#%`H3@X(jyL8)6#;zdmg^{l9}voy>css!ga92;s3k|~R^Fts z!qa|$x47nq_TfWjrB2sW-??7Ds^b5`n91O2wAlymmv z6qHn*Yilu6tkmdL&w2&sW=ZGmK3<9~uDX!T@bRJC$wNN*#vDBEGPM_b>r3yss$B;m zp&li-nlnkW(BeM_K*gzvg?naey1*Yi>64)!^aK((XstaBrNc2Sp_=T720ge3TmuUJ4h_+i8&q7CE{NoMxw{ zQD)k#e|24Lu&-x}%mO<5hm4^26C;kH73ltrX-S9lm}W8;T{#wFJJ?l(dG1ev z_PbYzi~EGbfDoSwd05B*v3Tq}WENjIIx-?VW4i4bdU$q4d5|b$^UcI43Xte%7!w-3 z*{I^9i};*8dx-yMuOd79UBZv#X->XX=<26?>p3C5jrH}f>gC$>A@_@fT#yHg(@<`C z8?qJ&Wg3+MyxXOtzEK%%I>U2Q5srUWZQw61=H-}{ff@W=!^1KHzXwWkn_+AlZ52xE zem&lvh&wy8^SYcOQI{Th-W=oWw0J_7m@qr#d4jG9?@HVG6Tjpo z_60ck{2ZtX(&Rr{w+b0yjP*2H_&6WkBhYD-$J!n9XRak6&c7_hz#+qhxh3#^l6RIg z`q=QYum$~2@BrbEqatKrFh#puoj9Y;fTp)1vjCRNz+4kXzfG7YtsopyqR{^XBoI{7 z+Bf0%Z9CG1$6C2nU8{rbpb397+mRd0{Ti!cki`0>0P&)`_VL4#ER zIO*B#DUS4kTAK^TZzb53845DJ(P%9aFTeNdw3!^ho z9pq(yvdJSR-n++_sn8bsWvcX9bYR$ywPLvW!5+(1KT2{hwIO*~j?1l5&^hjdSiq)w z=jQ;XqyDv?$7jsy!%yr_WYbu`uP>^U4dw=V2$&i`a|X<(&)+Y=jLR2?R12MqwG&PzHN*UzFPLCwN639SyB}D&yGCd-G`A7~C0hue@1uO+2*Ds&p z1$Ns$jHI5$JwlO#~#{RXL})|nh&+pzm2K%5aDU7JIziphsR zN2a{7V-nz|)l&N7O>c1VuDkk)u5U6qLuRH}u9>`rroeuiOez2?zeqy72qMQQ^n_Edl`!}E7s+71w)MINtyf^Z-Vp8Wi`Fj#|NBC3t^ z&OFtW@|SLvRn6wFZgB`?n(tXETzm`;yOst)6i{+NBdiAIl7YQQFNL|mNR}bD9M@#l z&n^GeQ5{1Y+kId7pd}di&#=%_`?ePy@n#%8cRRD%hj*iX8^M5Fmbrz zIfKKJQ)khG2c?Anb9CXEpdnQGOLOiLKlF8V15FU1>`aj+#_4p_9&NFLqBGsiQ7x*KaLH7& z7DiIr4)cp3p9G8BLKkq2g=i27-P=g$Lq&pXEs{j6G)8OM+R$otK$lVzx2PKiUVQ+G z{1Z@4A#4YO7=pQ-Htc!xCXgUp+ajaM z%ul@-cXIi!(bB&hk9f7xa{Od)`PJs$&#@d_z19XTFrj)iVdBCcl6DRfIrMwlc^W|VI z$*B`Peq2K+P{u!UC=|5Ao!Co1#4KFhbK*3I+&rsu6q)nhr7ey@y!PPB5`3zXQ@6ey z-;!PsTD97WkmqnLn%@&F3XEfUwv!&tqHf5&+F&3*+htQYT=6j5W<$O8s4S=FRUaoN zEJED<{o>?i>LWM=B%`8YK}*d!HZ#k&^`tUUIY zb(&caiGd`Hf8M_wQg{5tMzxDzX4#RC>K@`dTwps80?vXQAC7y@@?v?%dVI)ee6C1# zl1F@c5hKcreY!0&+wMntt#(gq@&%aRqWq2xqq5>W6}KNS6|gs!f(E(76B)*xF2>2~L=-#a!_qoq>Yl@h3ne;SV^euK-^A z>jS0~VDaCsa7P7?mM~}#hxZq%RL5gXBE*4`razfrgwmY9A}A)xG|jV2V5^BiOe+Eu zv&C`kPi0_%|1P_RV%39w6k>^^OeZ8CP9uj!V*$;rW~m-A;{H4T_fX7SU4fI7%Y!s8T8aV|jX3at z#N0d9`o9(gRSEcoM6sLdK!>ga{m-pC@BeBXoS=m zW_*wRDOYH^hP#qoDO(KdQ&AC|{Hkns=FtT=ZfbCa;0@m7WWCsKqLdf}BN(sBw7jqW zYYV<>13%S~ECT$|S6&spg$O>&Td}dj0Brd8=LL8JzK0$*j~mP2`!LhW7MBVdUP44D zw)MIhHMHzEQ7e2*0}WeySvsJRX!DZ+FGnb=St+XmXSaWU!rXs76xjJ_W z3kx@guHWc@4q*uit=MnM5t$WG-|B=1Ga&3&0Ow(aBaQ?FeowBph-XVM68ZN0eGuxu zpB$mUb*(~-l5komrn+FYULp`*jI7VC%Bs^Z>jnNM0*qaPd-oB zBwOm9R#__0jO#nM3gSa|E&RP)Q?=YqQGh|CmlQif5ECCTxCfCsTnur{FV1H>wIsS< zq?~G@YHwmfUxC|N+J4al@e2csMZib$znp}(aA_h-(Pi*JFE$at41J`;mHGXrDwMg| zB~7g%q`p~|gTvqExA#}?NZVzpuAjt)#Rs+_4Qe)%?Wv5*Cw=G@_oH!dzR7_%hN>Lt zCpGj(J75|@3QD5? zwucaJ!CKe=+w_3ojP%U#;YO8snOl1U3?3R4#f2G+@!xJA2MBIpY7pR6=n?WKJZeCX z@BHxtbm)y58gNuAc{tt<1Y#%+F8s)@pk&xFgebR8=~a>jc9oHG!*uZ@J(?pu5~=*d z)#|n04nK?C&_Kv3Gj0TA=ucqbL{|lRzUl%+h#RSY6(RopIClyI2h1v)(A>aqfYN;3 z`U}EXFhy6E7A-RW8 zMh3mWKOBgDaJ?QBY6#q|E)Z1MFF`$W6J&=U7|cm+#Aa7edQ8vrkl*K45b<%hYuV0& z7;xogfq;F&_%1xA{SpzWAc+<55;2C840J046A}E0L0~rWogdgQ3{HT6A6DNkilOfS z_`gQ(nS_50x%nSGF*#xy9*Ht-#%aeKM)l3-y1G8-13PLnZ^T@<%5><> z?;1~(iQ4a&{|Jhw5BBjK(LP@|A&HB$#@ zZ~2$Q_U@_1`IiTxESJYN=Kox6%=_|@eiIt&qy2uiVSwWHP%6eUH#`Yx(`sWNd1?bR zXVEBv=Njxdf6)cZM{DhUcqKO&I;9^D&4UkPRL0x?#X@33N0~;P;!{&Lm|8qAm08tN zb?Lvc?(6i(7XYzxKIaHGC4!vmhvV~On;=ICYS((JPlZZWp68*lT(!%T1BsO*soUoD zJ{llJoS)#=wOlB<0-BA2>}MH;kV$^;d5Y1^l(y+x`P{;j;ofpM>Rq zsruc|F=gkinYaqPa+#ZysjvWe1jKmYkUdbkeWM$+$P|R64ld`({Rc~uRtkvUHtFr} z1CKA6L1)lG8T$Gaj+=YFXK}F*i#(~MJ{$Ny7>@&c{~06q6@W#0m{yjcBLPkhPD}-B zsUd*w=U8qAzvp2Ab%_HRwoI|QJ$DCoKlt{z zHQB!q0arW&7ZSKQA1orf9L@%%r2M92*eGYXJ^&)_1J0*6|BNXAC9N^7G6DdG?T4>) zq6Cq^2|_k2L*i5fyVSoVA^(?4r4fOiPkG!4yoZB_7Xi9lb3LO7&TlFZynxXSE!LPA zo9K9GK#R8D@qBw!c9lSAG@iIc+6sbz1U!>wA)N$tbUJrXh|Ju(pl_DSX{--#e;1#Y zA0`6bH-9fOL=RX+x^a?HBF-Q@_uExxHtipy1OCT&-T4*k&U_DrL9-RE2d=Zhb{c1Q zcQ>XEv=!graWitT|GK-h|I$Gc!@o)nO`aj66A&CMX1!96JNImtjT5k3)Z&uJrnuSi z4l@E$69KqayZg0YE}h3u(^U4$=MK(N)hi5-zg}=c3OF~sgOs>QfxuG9h&ZPxvjLkq zGop@X^A3!a>ibudDXiuswAn?mM!_#)wI|kI&_>3_@F0|rMPo5}w^lyaD7qR< z`VqRSS+}*!I9)3%#HK^>A0TH;;Qv}IylC)K%quB?yL^Vh+x+MBlQr0x^1v2?KU7wq ze`vP6S@|*9JW&hIh_v^6$bjho_e%Ru(p0$&XML{s#y9ijMV%?tnB%iYMndX@b3AAl zSl4xa)sK;s*-M9~7%1vlE=6MaITC4xYV!V;{(DMh{oE|dE)BUhqof#?I|Z=MfXoX)bb;*wJyV#N*krNJWQftT!o?2_$H+EM*$3_7pW?zTCpRy;f>vJ_w*&A z@cy=y4NYyY4r-IB9^?{u{3`=d{A5it8?x7>%<-Uu09dvgy+`9Kf*PpMEGt;Kzc+wX=eZPcgtd z)EvNBp;8Ogb|D-M80@wlh&~XpKd%!TZWJ&yd)YU+9_BPRII|6S>G-WQ@i-LCcein3 zRprVamQfahKpUb{+54>69zeW^%oH=kNX${NQte{M-9Uwet`Yb9rC&0i_KRkezirY7 zY(w#AT^}x(#umZ0@VadzRjaeeOvKX7?c0sJXHmaaPz{wg2(<4tBuT=2qh9)uS-i3W z%|YU%zV)H$HFw8ie!{n-vFwF>{QX-zu&gi2oY|BzSJ(oKkl1IEni2E-->h^OQ_SI& zf&bRUyuOwRV96E`u>q3oZ-(ZdffK71QlIILXm2c|D{{eUV|Tak)_HhggPNjuYfFh- zX?g%QC=_X?MBYxql8O_#HI$w&Z?Z0iuhh-RJt5qx#n|4_nA=<5zDLEWwEL+-raGB& z`sh6CJ~Ox_hVjAnYEtG2F*Qrs|E_%P&b!OsUDJ} zj!WIz+FGb*P$L)|4lm!esKFHQddyy$>xecWnZJ33irysk!7WWSoTzqN(AW7UgMK+{ zts`1yDffIE$vy)*7Qu3wc*Q``$cE?lcnx z^Q!KsS`yKBh>#$K$jW@3&IQlj*cPXmOI&m}K5Wq#_SU`CQIsf^+fv%gl*Wn0eX`ya zD}`6QfHjG`oKuyG?H}c?%-Hp>?VpcRiGvHD`6tdz1^IYjm?I_Jn9X6We6QU#_Di5&;{cAA?x=ML6d3|u9k5== zo{zF*?tpr(dbH*jUK1FoE+ZUq|5cdeCBbjj`Bxf(^f}-}yV0Pp#_q%Av`4Xe zn`aJ$l~NKB21aCTkDzb|fT|x~J+XM(E<=R!&X2wyeYtSu;lFX{(^==-|4dYX*U$>< zfj^)S1WAagce`tPZa}7tcTE+~=@l#Y& zX8zRdtY7bhQ=Ef~Mx8V?kt)1K z(lexarkGRuqa{WMGSpg&e(AQle9SIIU$QjdQ6o=W$w8uD$no5Q>+^W8guWs@daM*; zxw^>x><2Q8etgi=qAw2_D4v)n>4>@t5^a7Y1_6U_&Nd1m=Ikx3uvQnePg0{ zvCiuqI5j{N1BH27u+-Sh1I|a@WPu6;u&O!Eqr34aJc9h#dw$;nRmAD< zoSl^Bih9k9KQR6rRXnrM*eM*l=N;&rOJBr7(U{8GTVQ1pF3>)0YrwH|rlP*lwNHyp zgnxammNs8%Dakvt5OXLfFv*stmsGV8B~W@=IJr`&qap};gvyc?R(4LEs+KUP{=8bQ zJ@Uae%loZ`REl#uU1!D50!Q{Rm7%?(SBajY7J8lHT^6p{ZGscO{bn&2lwlQq3n~0h z1(p4kpfanE^3)TneYstirX?hHuBk^zT7Gzac9qUs{pYFUFaw0n{3Nsv2Zu;iDBiBm zU+nTAf2a|Bv65yR-Afp!nH%2WoqFs*=nJS6O_1j}SR-uztbT%6nXoDNxr_~&Rz%O@3FA~@Nq?C zbgUTovZ9w3dV1Ih0Q)KWk%W8fiZ%`n7dOm;&8%w(WN%>|^RqRoqiDHv`mz**!pY0P z)1X7(!+Z-1l!KuK0!$%QeotE>V?jZ=urLE6&|d3>Sws_3sm(!1p~$Q6TEZGXQb9@R zNG0OX`}yIPj*)1>w+XjkeKb%@CoWO9iwWvooYB;G`RRPLZKo3J0-bKXs4pmotsYIJ zkSM)|=b12;*0xEFYhAM!0s9}8v_+bPBw^RLQ8%D)hS7dGNz~2>6#JH8D$|(hF%K9@ z_;X~uNpCK|KfJORKe`?2N;lBea-?z{gHyKW$uOXr8{nX}AIe^AOC0r~O}d2Cg>f$% z%o(2RcvSFl|1MFc-}<%pmvgC-RbhV&S@ACmudH+}e78VltSb|m(O@1Prl5B}u~Rz^ z!ac{EXca*oO+uR=RW5$(*tw&7^MiPi9^tt;sWohd-T133=1if59-kEnXWhEk&*tpN zpJqT>7*%UmH)=X$8KK0M#p`w=j3K*gAGShEOGc`a0_dz1kKl2gi8c!SsT$SBCsW~k z|9Gcc^8WFxnJStbHl6WqoouZTm9Z;1TEHLidh89B=2k$(u%=#rS?)e8UR*^#&K0QH z%8H)TJc*8HG?z5a5Vwu?(SKn=k{BrOP1hWtx+zxeY5H-Me5*DHmmk}y324KNB-Kt8J_|D&UG)qvNtSB^sv zNDE$Rlf?l*nyGgXEEhY#h>wf6ijc#&+1ij~N30=WGyqD+ZC3v`->d;4-^hR02C+R- z9x$X<`tApC;VNdCH$jo4S@BZ!rLnz(;jQT@e`|Gh_59_TEfn;M@aNC-ZwKJpJuuD3 zGrPEk)fE>_D?%R5<9{80T)sY#YprVHZKl*kfC7a><@!?0S==R1)Da&;n?nX3=dQe} zeRUMLpkT1$9a#rLx?g7&uy?rQIp3Z%O_VggG z&jPx)ZL*K%pBG$=OauKmq2SBojT3S{w0Pm4vj+3E3+j~b?ATe+d>MIn7S>onS7blk zeu{9%$|Cq^73>(bt1X(8QHv%0(S+dRI28srC%jqlB*}IT|N3XGgXM!s0}o9pHFAGER8uGVUmwQD2|sJ*kbzJ$lmIR%ev) zc={@3tT%JG%C-aqt*s6@t~nB$EItSc{uvlV-f(}c^gOrx5_S#cQ+XEEC23CB6RI-+ z&EcA3i;wBQxSd%(GlQSP!|oYk38_JaHhxm(YK|twbZE`E)vcURoACbe@s(4NX(eqy zYPhiyoqN3Zn@lM^l7s+DEQWl!`v2hhmBSMaRJywh&lcv@X=xQ)@+`Aa2g zCjAj7WHU=s1g|P8eH^NxSsW8hfBuktu~nZ0X|9>k(o2HJKCOK>C%w6qnMtkYYS(iz z-7@_)$+%5C+CZ>TZhx_8&mHq>Jo|1xJrZ6lPh)w8r(%9NbF2}+-O^72V&&F#R^#;a z__7xjs%k5Jd^+4^|k?+Bb6{mqePnap|wT8pmKF#v#PxDPdwU@8bsJ-93 z6j0y?h<`M2QbdZ-OHE<00MOodcNQG!z);iG<7ddlx?i5C@l*)$U|W&#f;D-W0){c! zT9n!mI05?OdBMLb1^>{j{+;6qf1tI$*o2NG;UVH|h^}g+sxp=GkJ4JGxv|fyg|K=Y;@qb^4W@dv^?=`v}JTX-{Po5IGf*ubal?#mi6B8Tmfy?1E05tQZpBYSReD_f zaJ&%YX+vOux1>}d(1ywCqTc%8Udc=Z*K3;=5A6&rfm*YS2fwRmjaLIkK&M`rRoBp% zY_nUEGf(A1>$SNqbWm*o)7$QfxKgyAj6uD87N74l$g=O;s}YPguOCQO@B^^sT3 z9`xS3*)aqtl;Y}y>x@7xFunSO>9mOKvo>I8#V)xpiwwT*LT}LqdPV8cmMK$)DWx9R zwPZ*(KQ!Dz=!V8194sJsSx@6yBTp%mD&f z&%USjvg`*YJMo&UKxMzrbI~G#ccs@g>(dSTguohwhUY=+p0Q{L{W=JAW?hbmn)P%1 z9QOqjlm(+l3Ckeh%@U!zqbZ#9{?z~E8av-S`Tj^z2%MDOT4bDZ)Xp|B4~! zECiPj+=B-Q?(Psgc<=zh-CY6%cPF^JZQR`h8+Uhi`PV-8o_o7rzwZC_7;CT@jLn*J z)~x!f>Z>W*l}pBSRr-A9VY&kU()`d}N^RsVO&Jf8cyO%q%4A zYjcm#_w79A%uxq>=U5Qtqdo7(L@lXmgfF{nc)0T4DWMRTjG@hsx~T1`?KRyZ&hE(t zp#SmA5Lf8m*)F;~lR6BhV}`TMu~xSDHWQLHv&i>95O)OckKit9CE$GWK3 zI#_gUZLN<;RDb>0vuPejt;hPIe+qTw>YGBJI5 zIJUgUkdsy~v>W*(UtzH@d#PPB)g5^ht1jv>D#^LEUKV@saDN95m>e&uIM>{~=j70> zKAE{e1HHbbzirOz?GkP7pNjZm6`MM3*F>{{wXi?b=zp&TP)O)YIUla2aMQ(rN}^Sd zWq;;`c_wW%3plnRRb-{Dx+Wyj{)pSizqPiV3x~V5ClhFCAat|!lUBBIp*8wf$>f)y z_zv0h6vo`p4Vl3FGSkY-kDIHU6jC^{R;!L3 z(5Y7@E0hQaSQf_awCJrxf|w`nZ{F--iR~g|D|mLs@b1r);5KIH#{H0@GmkHzo}w*L z<}rgUAGE~+)#47)P_OXXrEzXoCA|(a1oO4VUT16jkZG zakW6QPah+!8jk(RcUIWVl|`%4Bf34*_1cRcJ`;CEqwKzO>|y$<+Cgl$EV5*k(Mkdh zH|;yQ>yh*Js!`tIg%7;;NKMOLdDg>a!E_LY@;)sseNqX!YHq8sxbCGWzn!^Bz;Wn? zYRG%XenLVG_MOMidaU|Mdk=2EyOjfuFS1p?HN^Ju9Y-B+b1cadrRMv#ymHDT9OeVf z+|GEkS7(m%ezWI^f|1B1gnH6pzUXtgwZEsks2B9{;W!+RYFI5%>i-EtC+IB%M%P;< zUlaj;kzH-st-}Oizhw(x2q^2RBhMH9r#6z0OThp$x2wlU!pwu9FyXm74aV__Zck`# zMNHb8CbjdJsIEr|3C(djoJW^Q@kFO2aCz@~bxd!YPg%{zN`YH#>>f(iD<>>r{cH6w zemB%|DOcu)1}hsCIMzmP|0whphNvVVy;Nz15ABA}I%7&mjfJB2bGdPTcw~BMZIw!S zu~B@jNZGR5n++T=<;HA~$fxvL%{nvw`GCm>WfsiZeACkLmgD-4;6ri`4N-FA;C9ca zTh{kZ+En>{axU`AVU(XpNj$@#<7}m+Kr82WF=$4>8pH z+7xZ>hgc~R@UTEZ<;>_VIfi5RhjG6auOh9Lp24xdu(RA6;g`Z|h19!z^fMM$YC8mn*&EhN8+s+xRx^lT@5DSJ|>)v74F@qckF$H+Dp&Gd521*RSy zo2fv^@nBuZ-F;0grgm{-Tq2c;6@lbRii+{+<52f`jtP3DAdJ!ZBJ@By!M8!VgK_je zTJq%t)5dil)~yippm=iCIxZca8nNBw8bgV+%Nd=18ns%HQ%S{p5SuVqeh}3>V|9Li zIdo<6$KgoBDGM4X+f0guo6KHKYT(C`E)~ymSFCjzfjrVzQi^hqYcvp!*V5Rf&P-Nr zi7t92$Z5bRZb|K#E;s7n-BEF|)wq%ejb8N_?`e2t zW!3eu+~U(m^ONL4)7-e6V{PjlSv8VR6?y3Ja0m@CuL8=r_b!%?DFw0)j^Ul3pQ!EW zu?-Nl5w$}tB;|r&B)tE6GUMR?ATCK7v_+y1ks{oX*~PFcXA!r3Sb5~YWvX0L^yO?V zyRi^+g`J#oT5sqwA^r4Gxyn{_`}+IWZOau!BbXBtmhfH=6fQ>lTIF(M$D#&h+#+9T zijsJa=dA$(#VU5xARP=wUaL@<0iC$ezQvVk!z9&sm825+5yxh|i7$H!O--A&(*DOC zILF7unhg31M%Jwo%yGsVW$JDNMeU`GTC#bkDY$v^<{nviORVrX9@AqIf&_roUW_D6 zf;`Lz5r%ZlVGFL6rxklOq+E}uxIdljeU5?RJ~gCoMB2mB40$1_lj1niv{kQm{YCuD zy-^RWR!SV7rkl(UVi0{;Cdpg0`Xcfx^La1oC@(WaXP&znWXe=lD|B;^=;4# zH&nJL`u?N+ckCnc7&S5}nB%fo=F9hZBuq!9Re=uQ`R9{f=Copdx|_(Z=o7fgj#M4< zQh#`&6yM<8t8FsUgDSgs`6An>j|;q%)T89`j#ucoc(G=)s@T8mntq1v*oUfT9dnPm zIB8_>-rG0!SJ{fNT5fIci~mc=EpNWqW7)IV<_$kPCxyLJ8BK4oBA4a~%h}B6WY8>; zk)rLLS5WC9nJ~Ms)Q>jO+~0jG?yHx;WA;RVhL9y)mX()0-5(S)5|JUtK~$o^$@0 z3i^e>kwZ5WC|l@!I*>3-^SJ5brz5#!GuBpCc~->XfNq@*Xx&AL^QI3P0HL2OOC7NR zFtjYTV9-hYTjAOjAz)b8=lRp~HQ9%e;nKLq`Ou4n3`&E|g0)2J*)>It#@=@NM1GVD z&m_N%@LO!jmd*(kLrbyONsYD4^))pgR-T=aj#Ji~$4fsJcHW@;=sK6LlDRgjk6N2t zz!94?yFTtvq`WS0Yf26qi4a*fYg}Pc>m$q4%!IE;ANFF9tr8`(o)49z9GWq#mqJ;J z2O#B;SK08Z;m#|vA~4~}o4X*d zp#{}&#&T(FPVGRw9R~re7gRkpYG`I0>WjBB#wXqw1{IrrGQ+5?8vE*Rta4-Rbm%(S zh1Wf{@mkD|sk-8X)ZFGg$$9OaB#~-!>ZP=P`3+9o1AoLd@3r#1{76`+K0z9f)0PH> z)j4XoYfL50R&FRwB^%b=aKH^54OG~M{VlxWh~}V(zOe-5GY`A%8?JZnda_6>Rl$`oA&=iK5rO}p8|m2+XL+dhavfeEVK;KffE|J z-eG}ov~8i}xHn`j*3!bq_>uzIby7lT8J<;LHv?Iz?S^O(Ke-+6O$y(5=1VgiczjN| z*b%SCtt|Yl$bv?j6ZQiqDip&~F7j|9S*O9A9OJyckXwOHQH??~s=vL!UqGp>aVqjt z4E255^>|!W6ZnO1*6=1ddPq_6{%S)t? z!E|2gIRjdiYeY5$ZNot@yj_GAb;3iJpXRT_sYVrvcyn*V-k-dWJb1$&YPvKBj^L=W#3OPy|z zEkshMD(lm#rCnr!d-3#2tJEL%_BCp$ibM8f?an_v^(rfE-w&&~k$$Y&CV3SXldK0s zC5&kkUJ=p5_b0CDgX{QFAd={`A|7Z_eDj$Tj-T?uv2cv&f@NWB(q7@)@AkpNF8LtHMs$i8)^B<(^uPYoBvet52tQ|N8i^+$a-_DE<#_@jKiZ-J0 z(QBpj1CA6w7A0T|)7E>hLqbt@fRfe+X3y78TP(g11eqAMg+9&D86CN~ubFs^tWB#- zleQ@#jY-UTnY2z~pmk~oSZ2nOfLRYd4wkgv3`%WQ8`qmNZXnL_3+F6WRZYU7DmR;F zY%-}0K8iepwTNz&t3n(I53?4=)xuhy!-c>)XX`&C+XrEklb=A}5p+CEl9Tcx9y85x zFn(3b^CBCFAyuFM;bcdl`l77mi_8D$%V_fWP4xplrvI4%a)+o%#{L@Z&bdTp;Hv-z zCn0l;%yA2g&XHz52qv@CBcV`1p%ef0R8ae1?s>R%)v9e&h3W`P>Zuu;e2YkLbQ@Zn ztorfD@uIcpu%i8*)m272@EF);ezq%&vVQI5@iY3j#1F~2Y7@(+VJ+ZpUneZjrwcKj zyThqnuzdI0N{e$^M*%atn?d2g@}J?Z?*r{B9l1t7%|&yimP_Bhtu!!2`&_LSVA1E< zv1=>gf9YA;7#c#~GE-s4op|2QrJPvyNV;&zbM(1J>~Vj*wK+MdaPC*0{0aNKIO`h> zk=CLxCG|XBcWx=*;#&WHF0>+MnW3|yR652I=1HbJx5%irci=x1M?#(U}ukVD1OhZ&btuTvE?dA2V(!%sljbGA5h7ZUboHMbj zGg;?#8pp+3Aan7VK*b-AoMVPk#TDQzH>uoaL^pa2f)9A!^5wJNrEtw7w&$}JW4s2$ z!}0T>H6EbMe{RB{)g&7gXIolMrZ}HO)waUmc@ASF9YsEEg*x*38AWiR(k;SC%s3v6 z2fcHYh}a?BBXSRpg#vr&(%4+a1igaI_|TLR=f3nK$|LVWF`enBSGAd1wP)O#G+)1v z$=pNj#Z);{n&0u9f+BPiTNkMEsjID?4aoS-`+};;pt#Y-gTk1r9M`tk`MU!*m027- zcoSynq13e98Mt~JDyD2X)KEw(XxUL1Th5?nN>&V2vCs$uS>!li#R=JF{8R^WBLy}9 zUZ9a%Bjr_qr8$P>_7#K-NNF^3=0J#i_1EJqi-3uPvzB*IE^L4M)&6G_d>Xp%IFN8{ zmdnYIWrRpSjoIv!YoC@t?5NcslhT%RTWk!T&# z+g)iC&$RijpEo-1H}0TfU?_n@>p9=6%oj~5N6pR_&=m&?GZVbdT`@0xWiiV80bmVw z)9-~y?+Nkz5ce#KlIq<|YG%?;B3+dGd1_7$pSQEr(^{jAn(7znXsr{?iz_UCd@nQ| zSuZ{6I0$iIb9@(9N>ec6`i#9fx?HZf9$zQqy4D({SH;JezC0E@`t(ysJ0(8GGOS#x zZ_=HUvb*nc&|*a4$a`<2IH{617vDDSQ9a-ZMX7ErGL&n5HVAZN^FTAO%0T-?mD$VX zeP*LD5#pzVgjsg11u1HyW1_`w-uXxsJMiF!W(FYE1D0(9iS`sA<51S0h{jTMy))Q7 zaR5$@_*P=LE61k~4WjIJ!$2;ZhK9jMhMTdJ>8Ms(5sZp`1D@bxgM9&AE7~6oKYjN> zwL!EH@D{3nN9=Y-|Iwbl4(cN?SM+rhoCN=bAz~j@^3%PVh_kf^-StQk_SM}10|kxs zh0NiR5#0z1TB6==Nnx90KGfK4yLwARt6>xC->+!-+T|Z{0zR`?I zvo2bWC!)U9)Qiz_+J(gsY5Tj}gSShbeLo}PZ5!}M(Z*%QaGid1$VGwX;cgP)npml6 z(wb}+@Kq>sL)zk>+XgPon4?rq^UA5&aObIbCb|s2*hPA)03n42D|0Fi!fcE;-$g%9Ya9?EcnwE&R@Ln{g(mjyOO zHaRHm))AQJAB$v3XC3?zTNC^j^G7KBZ76=^_WEG&Ynvl?)g-|ro|@F^GrfIr5lv3m z5d#vz%-fgi+LdW#^0KsZy=i zB_;OfrsA2^z`u=BYs*a~YTEWj2G@9?ureuJ;wCrPzpW6qjOz9tiJReW>hF=2P{cV9 z`PW@IJ*09#p??nn3`oiU_5|o4oKR-fqz@||+uGD#bp?}=^b*D6sff~&dJB(o0W?%yq@2qg%C^);iXNd8uz@2{cNlajsyJYSu~65_~w>2s5dC2+g< zc%=b8EDEp0{dO-awcZuiPy(ay{b<^QR=#+I2pY&^_{u$TW*%0dwwFFcS}Ll_2+x(u zk-g=L1Bjj%g#o^cchN-+{S+1&D=zM9qcLx5#szJBsYX*@w0VzZ&~@eRI0LK#DOkwY z4~;H2iL*qwT}>w?NP@Dqmy+i*%n`Y&57dm;QXVtA!;qsp>jBkcS5z2=Wn~iu^ae3*ey_6ch;PSX!oy{|X65 zM*u&fEMB+B)o7WFwKiEa&@B(j!qgR@FC42E0N(}PDw+&t)14Hvg#EJs(~KqoH; zs~py0kQ9|Kf$YN*WDa1YuQR8ahd^M@)SDFQyVALJu1*mNBBxCe;A~x;nLm#=Oe02p z8QsWfv9Y-ClcHK2pJ6 z9`ztur1Zv2)v*JU?$#GkO$Eh`T2NFZ25jk1SB2G2>{Z6W-|ej{hJc>iPtr&@$;v8hK+~jDf#Y7aXBE#GUeg4a04K% z*6ESM4ZQ*b3d=I3{-~W2;0M$57N#aE$#^kxbB!D|V0UO>KHI6WIMzD!3i{pa-H!W+ zUDj<`_xnM+xV-sDP2W>TuqPos_UVB}{eF9UW(YGL^~d2(hUAA zIF4X~EQXo}niO^wci9>xo&VwV|NhAVp=c2#c>H1Jg8@s#^Vzll^miY=Ab6;MSWHZn zjk(&1L@hVf@USS6Hi3-@ADe_YK$%7q$PPlb)EwoG`6#Mu`z)1}{r4o@{^$iI+YXQ5 zPESvx-!P+9aaS6czzz-#DSXvBREDh=rfI>H!i!+$q|&~gDqG~i#KHnE>EzlOio8eW zwvQbc%^kLoGjuM=2e^@|nqUnpUWj}V{u}vXg4o{S=O0Bu${x%mT9^p4f!>HMRaLgJ zgXL9K%mDp2Z+yJ`Ng*g(N?*y?b8BQznyDE05*4k zrdM-G3ixSk!v8`V0@Pu99?&z34;*p}WRR&a{Mz^t;>E2weQ2UiTwLzH)-(0Qj;7x% z=x_FCn2UcCcoirizf>*9iJi8=&F1_6jwtL4ms}Uk5fwZj^C{|vCL--uE#D$DzAoE zixVw~@O}|$t8|Ky($QgNk#xxZ9>3_^nte5Zg!~qP3H*J94;#4QN;ZAywM6>@JTMWO z-3y*AfK80Kw>jnf7sdln{vT3+N#yx#)bK_FWSw|mTqr5vD}2~Uz9=aCoIYUF??q|* zmV4PXy|%Lx3b4vHJpm@!c3N-+!`iK81g+1061c=n8anT{z-CzPmxidL>FR>@93Tbw zg(OV~GH_MPJ0tz=G5!LF&!RvmN-gy0g#Miy=%__svJ*rOQ1TRCv=l>;!<~;0Iyoq}?l?eWq8b0i=L2?FK@I3$3T*h)wFEFaLR?jb5C{P~pDLiIz|?ws?KXUG zn%?O2O~Bi56WVACYkN$?*rjt<*!uow->>ZAP0IBrMLAP z7>PvY2tq-#tn^qnZvI1k5&-*^ecfm3Mh?g@0*Mw!6f!TCB#~(`p?w){SPPSr2ggL%AAD&Uc-{hc;%vrq zp5+1GfUrS={{tKJSGQq-4%iy}*KS+t@TI$5X_qV{D%Qsa2Y*_$8)DtXUzs=_+IW;~ zuY))RJgx;Fn~Z0?MvOJkFmN(D3p9?(y*W7U6df2-=P{ zs5?Q3T#K2&wGIf#{_7cshoJ4d{{wBCFur6xl)x`tUHMze*purxUlKMk{<@%o(!}@{ zSXSC)5;~x?t3fC8U&2ZNBmm|Bn1s*PsdeIj243}7*eXDpTg3)CV%ag85-kt|3^zrj z`?ZDy04Bg%H4iQb#ueXSmW%&~o~@<Gm4v zxm9qL8(1RiKSctxf#zwE+KTME~Dg_o5I0?ew!QO6cDKY`2UFb{2RGv{(o1 z*^r00D2J=`^(%<&{QWdTs)SCxX~lKGVhU~Z-hk@AFpwK!a0Y)kOspVrz&i!UlMmGP zOG926O-cZkYW$G4<@&eW1)$j=u6e56W}Z+IXrHWWP&5i6JxxI2L2tONq@ZPi_gSE$ zp?oI+mTX`H*V61k#9g+m)J*svFxlG)BIVHryXhIg&do;}tW_bcd=m%6?HKl9=kOgw zj3k4{+GGJ`Zg(ekPX7y(wSEf>u>RYQtYoGn@IG16btrnEl?5U1GQKnjGEBRLfXN_e z^Uvp04*1$Ku2b23GSF-(F3#v*0D1__G&o#gBelf`5FNY7T#I?mxC7&h$Fbc1%0>aX zVjnhBBYXx#Rxc^SlNkWCq6uuJ$%lv-5oF=LWv!YO>g9W)4vlV=WHazaXCs5HYvdFS~14i0Dv_ZpxyB zz%7qAjokkW&)*OL3{bP;B9D9=fVCw`v@>sPi24BM`(;2xe6Y{ka{w9q-;XmD(CkSV zx0;14(CkduTa&+F&=I0TeFZk7%K+yD#`}G^%nHag;f2o$VhR7b4g}c4|2--KP$_fV z;jQCxEKD*)TgXWX$Ay4a=z$GzQxFkrx8ytX{#$8?p@2j`>mbc$r>GP%W63saB@kn4 zM~1mH>92}2SNNj*BTrLN(KloJ#V2g|5VRK*7znH;31{&_MmiNg$sydu1a}}0ZQx6` z8vyp>D)W)P2^DZ(0fGOc`>Nyh+21z<0yAGx>3DEyEA61Gf&K>sXi0xFF~%lzbEG+c8*0Wt$8R!7OQQgUBTi&QL_{!e-X!xs zJUoyTYO1BW-6QNKj}O$wtW5*|`vEZdp^=Gsh|;E5vy{sNGqW&E-_<4gH?%++$P@%* zwfLTVl@R(K>ftfKYb~yx&(;+h>iN0hKH84S_7*jNY|ZB%ioPsJOwWrE!Fxjk1+NSrKi`O}v^1B#J{Tf2t_fe{PPnlsCPoPr1lOH`S) z)k!BOz+zvYpSw|e!`}lGtqbDs?mvIpnQyzfx)OZhYq8#QF(ED!<_;|npZbUCe>aDw z$jwcBx`1|nKDsF=Dfw}J-P+pvo~we0fon&(CgPXrxicZe2S9?`vbFx=&vL*t>o@uR zkF#?P51pLbq(9=?e7CmKKWxU4Efr8!9^3ziFeo8-_-KWDfNW+|^dB1CkS^dUNOD=b zQd%EjRW2x_p|4V_y5 zM@gW62ke-E9TuS>!CN@zIQ7ljv*SM9-}>cYMP>sFS5YrQIpo`-JE?&-*?~#v2dod! z&;Pij1fuZ&$_}&PV`JmjF070VfV?8np|F2Cix~`rfm8%`FmimB=ruu*SnV%kK8z5^ z?edvEU|*kh%F|PmTjmRbeFM8*gAv}lN!Hs19GdqDP_Ow!NFx9GrgK37aNN89KaLx1 zJgg{nk*E3j+2kt-*z!4F)3ydl{FZU4NSH~7vuB2TA_WX!E)TLwN{}dedV+ucazKoY zXB-;zgL zEKUO%C@7%mq}=n1h=}~QYV&;WI{qoDX-zI|$WNkO2>}x;Vf8?rUvE3YE=#>>KB6V# zQm^gwP-!C9t~s9}<$x4w2*M1x2mjy~gaBG3#AN2^lN5H_9i!a2&uKE-u505r{fAdVc5GmMN$7+lM6%Rc@UA6b~Wp` zi`r_H6d$yi+5=}DzMTKt=V8(PD|Zdb!VRBNYr?}1hY3!Ejt2Fasc{d&*oVitagWVZ z&COKDcf+NXcF6Y+eDBgO+7D$OYfJ5kU|mP)JBd|cERG7WY9n4^e^D$*qY`_Oluw}H z9tM5$A|n4e-lA{+`M2?L7Vyb?S{WoZ_8~C0#K_X;Q%p6-#UlaW` z@4NyYx52TboAJuHOoiuPSa{5X5T}qDNk(Mu|7%pT}w zc7?MGT?lotpL4LZ2+HzTI++l6c14sOsNX~6&kS;0Jr}kmLln}!EoQ7Lu4IM^d1ds9 z2<+Y3VC4j5o~La|o4>MGA-qnys&Q3qQ_n4#jJ~!Ty~#CB#J}k4QzT$K>1g6*op$iC zl1Va8tJ*{K5%_%m9(KG`HCmEnFrQi2$#@AETTby25zlQdnfiW<>!X7lPQu5(>_~O) zvtBO#iB*mQ;e7&le*5E8Z`$(4@FdWpi;k>78{7F?#IwGPAyf>406jmr3ac`;6L^MY zL|_(6QQwTE1MBU2)};a8*3?E{__H*Q)Z`>J>E~4vR`keTD91p}uh3I=Bj=_bSg9Ff zV(95(8)r-(^vZLJXwy<5cC|tC;&Bz~74<=VIEiM~O;1w0s~4JE-6vi+qbl{9?(6nj z;>(lw!DWgwjaku)gl|GZ|26CaGKhcuM{ct!2R(maHxp*Xbd-%^J!DaDFnj8!`8h2l z1aUayArzo~$BN5){C@`3?z-|M{nOsHAmK;cXP44INZR>B_Y2@3_T&Fd_Hjnb7I&)J z(kk55_m^7#FBjH2TLX zOjPSn(g$-X+o0dl%HC0J`2Pw}(`A9isyBwF{|ZW{5il_^SC^Zf{eWXJA0POg8Vy0N zBXB28a2*{T&Z}ACTDGz}{(tuqLeOGiy8L~vLgpjkPM#|Z=c==T>>cuKr7puz0lDnCpmm-k3K*!W% zh@sOLV=7t<(#ILHOnxd(ARf7fys3sZLj51k9_RgcpiOTnj)U&aFwupURlVs*5JN0)# zY1$1{YCn64oP4^Bl3DuZ)!*v7*D^e@$X*@`T4W*rMi1@gNwtAzWG)(1ZNmuo*qd|y z$flErEKXe~e0cG6VQ33^?{e{LV;wE6utLT*e|k+egF&(g&Lx)m!r8r>fizRoh6nz7 zgTn@!hRrJ4WUhc0&sQ+F7rU=A&oI%y$G-9sa~|3T#2qiuWR(VF4F)T#Y1H;DO1qx* zW$^qZjJ;A(AR60;o;NA2`EX9}1PvpJq^UIz5B`)*QGYBJ`kGme)~J|;*KzKJ)pYMR zf{b!{wWhN`+u+vvvsBE&mm%Xz-GLg^&XFX{e&|9n%+iuoG_?aS^`w@xG@z1o{yGa8 zO*aR~X;8XuH6HlR;^^NQeCELf?z~kgsh<1`l?L3$ts9Q1){_GqM`0`}p$=_*#dMw< z(}iDa%_!2H_i_D$f|@Sxmpv_&lYKjhmckS2-Bokr{wt{JQ+;lel$_k?qY*(P2$s{8*_RE|3y_bu)OUaU041EJ%aZPbDbNg z!PuCJZ&`nv9>hwteer0x4#Z>9Ti0V~X=T^_adOj9i=J>2(Bh(IeMC>vo`FW6WzuFx zZGVKr(ln9?1fOWx<5vt>n$)~Dp|swO0~Gy?r9xxZKEV+={k5|Rx#IHTSpA@3W*&Y< zJQ`jGX*?iC|y6S6%h%RZq`5K8A`#O_`Aa=xhN43aB^TC1F0|Rj;~$6S>Q4 zuLl`AiD3d08_6b51%-oN;3m87CzS!e$>mDgJOy)mXBt;}kCN&FK0td)ucPJ0yIHcs zLC3(bxhQ>cB)MElprQNNo{b{!12FM~&bv{njY0L=_9&(gPHdJ&E@*=+USj?y*mo|< z_cvo?9{c?ZyVfn_<1)}*FJZ6Tw@gGZ&nMv42*kUHdUhTy@`9j$O#|w5Sa91Y4z2~ z{b^S3#EiS(jPd6FOQ_RxSbK>RYWOcw`_L=h7V{YETJmoS&?=$WQFwj%Bp_MJfesO(=x*+Qrl_}ZkKzNj3^UQR`{Caje>t3 zsh*zA9g@u`B6CA_;9thxN?BM^(!DPF_Uw-rQQto?>bHuonu3Cl?)GCCWDx`k?>z@_ zLh&@s%~@XP@!5rhCpIUI^p~m)bN_A6;7==Q=NARTDc07d#jcvg z$x4vR&-sm5oO^iV8%s*Jtl{hb(M!wP8nc%wAhC6H`SX6;&(di2%KKv1E_!&z7nStl z*!)kB$}ycd@emWH;OmSaD}h@=t8jJ4Ioqst|9{5x{Ot?V&DRB{XJ0Pnv(cxE8O02* zie5L9$^+S%HJlI4EeW0h8qcX|3~Nzav%)Sm4%S-0PI|{^8aLBcR3GYx5A!r@<0ol* zwh<2}EqG*N;oTGo+7o}tWX7wYQNxQc&IAgtOxRz!XdNJWzT^4~?tj~aCH%R{chk`q z>Lpea_WNoqZ@kts?2u0wm^RT!AuvmTrKKcRtA>hqoT(81 zn|VsW0<=mi=Z^?AtWa1W7s>NA(@eyrWRx$|_6uBy%^8qo$r?;FZlk6rgT#58RoVCx z2gtS0o1DTxUf%-$2Y#tJXKbxL0dUPh!-#JYKW+Iupb`-LTFCJ#ix;u{ek+An)X?CZ z?-=Q+ySU=eDb#h^--QC)wt?xi|-r!kPLB6xdf<5F_f8yGp^ClDKVWe1or9I?9uSoBm)d@4r1$_&ja#Lj-kLS}EC zTUO6wXgo-tpSND8Y1!Q_=r>VlXJTj~i82El$mb5uGQbBp9)(nH^qwfusJF2n&&U0G zbFocA53C|B#~{EkI8LcEfYZ5MQI6N+$Z97a8SKpBBAiO8R6Vz`HPZ(Zoon&Wk z$5X_ozh>>(YY6lXS)2@^p+G&pV_O85`LG_NEnH4j1Xz$ z_vN47fKm2u=$cCO!7<`SMikFl&G0wNS47?2_CnSI{4lpcUE=e4HReQt1qI&@2N}dh z%k-mg`F6)i;Byrxoi-DFI4(-6Zv?ViZ1|mTv>&;=aNUbQh(iJbr*B~Iv!ub_T_RL} zm1^3CIbYr|it4OM#l~SJ@qvCyN~VW3PS4DYkCDZm2-u$&mO9DoLnb2 zn_RKuf?DTkf>B7_{;&a2WP!DYEfb_N53Fi2fnO>zBhJr3e5*KwN}n6w)C1EXyUn0k zhiMKj5o^4xDkNB2LwnC-2oJiE5E1as(W$fE+}Xj~!@^a+5abFnL7z=k^4Zxo{w&7G1WfkL?62R(?eMwuZ1CK*!HRc~0d zwe~CL&Dpd)3_P<;LIEleemivHC1zi7k z=AmD{ehIl{Xx*LcA!>VKBRYuT@9c`;^=Y_GMjxgzWtY%hbUOhNX>+Cfg&&CrkH?KE zckf+GoF@#gNc@m3?TjCBfk!M869OxiXgrWTYn|f)!3){|GU1-U+s?)7!z1_~U$L0D`;{$$$jHfI?sLo?RVmRy_C4x5siEE7IoQ&(>MSP2 zR8(+%1YW*84tQHLSLfTM;>19N!~W9W5p6-C$j9^k1s~%VVI>sdv}XMpiVB4=`xTP$ zZr$?CdYoit4-61FYuLJsovYBdLNWfc%=6Sf?RBJAWj7MEi}fAYt?{&K6G&cx;S&Z&v^Kb*Pc~aE#Np?={Xc3JLl5oE3gWYPhy|`AYrGKy|h^9xXjERJ!ye=0Pe1F z`fDxeI~Ff4qL>J79YT&1y>))3j=zkc5JyE$DdNou$}_e!SH8Il^RkXy$0 zco?zAXM}b1=&l7|Vdqy_>vMUv6 z+4KwJ-^PCxPnwoL=THdH_6$0sS69B@l zU6z0y{pWho5v{#lZnv%X=MVH~Nt(-nNsYiQYkBgrC(9lq7^QJoaf$p^48TctOxCC0 z-0$x46Q7o37H?L&b}7|CT(+CCQyw>q6k8$D)@z~(i3Vz=T0ei`&17e0wg<1Q!4Z*q zSm~1w4dC!U-?m)V?*+GdJdk|+2v0pQ@U`CYU2iy^)$;e2=Xu)x!NJa~4a!x240(}< zYXX_xV_=D9g~tOM%p2IGDsf)7>u7d^^Y8RQr3fX<>VfsxTi;A{-Y~pJ3t$QKC!_n4 zeCXN^49%O^7C{G6T65^q(b2KgA=*87WAgBFG&stJ61H~$i+lsn4Gu(ZCMPmk2Xxc` zgXaj8I*V~Z5&Y+$M>(&GpA5HW`WJt9n#IQLtoFSz(SP(uc+YEGf>70ZoXCEIL#J7V zOoe}ClnvRFrWRl+*ug+|8!A*!p-TajhqC(YB-?QT#2Nik^#H)4V?^fF4v>#lzE{|t zSK4J*RHS*bmHxy=jg&_AItR}8;w}Vh3oDUjhiY?YYpsHelJ%fVajBI?QP=qx*MBG| z@5O0N^lPI#XP(y;g&g12=P+LTR5Va>mRq0pO{O{6@*_Qc#~>E%>G6WqT4WTN&CJ}6 z1j06*%)>2NqiSA~o4Puu*e_f^40#@0w5X_ti|~c zB~L2Hka3TTUtke*lWg4=sD|BW>8-ta4)^N;s;^a>j4*I;Srrwil}wc!Coc>F^~by# zTI+NAx^+67X?N5;x9%>4u)S~A?`@bSX_JM(wCvD0ve4KhK|&vt!oG~&Hp}}D&*#y4E6hVpMrEC9Sr0>k*a3>4Zzd3cLLEr#92AEogjXzijybV>;aS7Qa zOOB}hfUtDr66frwgB*YKv1@umpQ*+U9{MpA2L#!Sy*JesZ7W1-kHQaalInsDTu_;| zoOJ(Nlf}C2_K_v*Jttx@#Z;YeUwE{O8xxS>_x2IBMkGycIPMB|p`xLYRMlY?4p2`@ z*MHqSFxoy@K__Vv5;?8xBZ_DUx49Kf50qW5L1SNZe(QR@Pr)s%==}Fu0IOchPgSaJ z$7T^)^x(&U;B~i?6kqGc?a(p)(96p-j+@7quJNp{3XcUI8f#&Wgr7eBmL?W@owjl@qopuXl9*+_wQFrj zKF$H8CEyd}(s?HHUZk8Ha=bU?XyBscMwA=bZJ)!mU zvV^~aZA*dY?y_33k!~V@=_*}XVb@i@q*2icV%|7+b}nreH5 z=0%S|{h)YZEH%VQWxp5l5_6dTey^i%jT0uv;WKHjsD1^x1*_GKW&~6~*_D1LZj?6* zP*h0E+I(cX*I3KNN$>i_zU0d^*$Ui}Y-DtW!>AG5N9fYujIAJexU;kKXy|Mh3ZY5i zbyHGhvKA;4T`R%Jp`mAkLyQJ%zL;Trw`@q<NW&RtPeJ%Ng z2MS-9+mnge0 zjFs1(TZ(#Hnz4Lf)#D_sUZ!s3wGtt#&`qOBdgt}gg3SeeeaPqVzA&q{j{GS;Y)+5g zQ4RNk*16f}OcP9bhx(a@$ac=yKB!TRG!81q^De;J8BW*KCU#(a4i=X$Pvt4H<%@2| zUgJjm)mOD5HVT!7qdZ$x<{-!EbQf0OoSHr$LB3~NN0MxJ0epKhaNYF*cJotj%4}=y zdopqS2;W7R*o44oy^_eV`NP!Q{!wlJXTMHa+JalTwCi>)ij0^a(QIEB`Cdo|L*@-0 zZw=7ik7F$;7Uz=N)#7W?KRI1g&^l2}GPid1S9)yOMM{qYH^c38^^n~F!O74&LFiJT z&{AKEpt*LB2~rLAH5u~($htFNIZMR2c8WqcHNRoDb|U{M0pYLF5xUyLqSAb!uUmVm z8D8BJefXgi($BuFcN6CWz|FnwH4%KX!n^xt<#JC%&-ZprWuSX5Bto9|j663;5L#(( zvW;D*%DGl|q$*e1#?Ns3a&G}gKIGLh$S8DR^%ok{W7OrAtmLwPU%K-={1ST(+=*_q zN48aFAQG>{Gd=DY|3mEF824F47X?Nik#xdyKu$U@)Aw;ym^VM~Hcmf6Yq-vV^AXkG zVC^pPRm>&rtNI9Lna4?36Oxbf0MZ@VsBO!xW$h}hUo<3~ z3}V`v=wq^8%{1n3-Uea0g5v<4C9;CdQ7{WR)D95i($GE)3+l?dKa8+T%*6&Q{%|=3 zdpY46imN2m_zm`*qCIW0gbkc?3YNp7sVOUe8_@OAZ*2~oO^x=8{!OebF{Z_m(_Ebm z`da1ak;|*BhEIhOb6|pj4+jucH|=w0GU(Rfz)~f<)I$%eby_3TQHDVB8>;lz46b=o!HV56cgb>vMvR^CO8VPb1R%ebcOy-1zD2*1u1xWfR9% zm^p77SShsbEXaRebohi1PtDglkK>VR!yys*J52BpYbkQ<8lrVwAJGUGOrA#*t}>g? z;rv5xq2bZLIDR?#hE3#Qg8(I<{BJ#{zWKbUiveYiJ7zIW;o@Wkxx< z^%WAJ3W|)yCV@aiyX4QM)xpFRQ`u1~N5f#C$9y~Q$xj9I00NKkGL@n@t#bMP8(1TxQokOXW3YJ?DvBt2k zjw1q4aQ*1ho4?;#Aq+$lhQL!FREPj6e3Ipbu@aFuVIQdd+6^9A0OZrBaD*C$AEn<1 zEN99GF|e>wBG-YPTRKbF%5HOe`zLfNBI~Nu>|`q37U|g3IKL}&6p;|*v$#%H!gAG0 zQE_)Lm&9fR5+VDi>6w7$R*J&)(Bx*|K?@sI%&nU}H_q=y=d$PqXxQ7PZCQYFWE`Fo zg#!5efYKz(1dxEXuWV}n27~RjoA=4RncG{n;+^DdpWfNYF5jgkXHUMZw3@SfV6e6N z&>GYX^jA{BEIG&|GFv_2=DG%iu8unNz3VC0*KL6KOCjnx^zr-zDO@qXI-sBsMc;)< zVl2czf?%{k<_NX-sw=Zd@|TwmK>=j3LZ_mplF!tP5hUhz8Grlm^pG}*-_R(c)Hk%V zL-P<$wcs#1IvSUpY$0cH%FSIp2Lp3W+ciD<{PdhNyY=HQ4?|HYb;DDaT7t5E|H1Ot zk+R=QSHI(p%sp9$uD{=QxtfpO~X%=W^N)4Q9pO^k^uG!kQ+frF&5@Uc; zhQRk39mJr`0kDq)CsjQH)nGgc$uKbifycn4{D1DxCK{b&Z9v^jf#Zhuxdz@33LRYN zhTQ&5A12&Ig{;EM7p$N>8o#?oc9Qo;e_5z_z~C>Y9sH;Ifywc*5p&D8jt^ZgE^y9t z^uj-ReBv|Hn=98|bYT7}+&i-|Q!_g1O`xxibBhn-rLm6uS=0Ty#$bQGH<4NH?|8ME z`T&A%ji%XF&j5YA8VasN#@q{#w+ZR9mi;;5sJXZFD?`fDzMs;_hNhqG239&U6a5}s zbt-GDel0}#Yd|7zv7iKNGDf($AKndEe)t2$i^CG_|5Y9@0#P3@)^SGrSVN3%O9_B} zD2K@JVgbuwWk&n&5vs38jCG82?!v=V&oYU^A-8n+V4QE`KcvD-ly+lnLTTTRF?s<2tgB#8kew4CEdB9^?Rz&3H1dzbebt@* zD_R8&)gVKniX$j3`k8Mw5?o1M8AzBV#hodEL?Pnp2D!#C$ewl}7R7xmy{%#9Lw@$p zez~1XiHhcRhP$wVTSILYfz}GDFj<7P1eqbh+j)%7Y!+`pshFxMUAitX!gFY0D3=?9)G+|Q2|r5` z4V56dt}!0_1dK@9NUkg#7bLWGsxgcu?d{9Tc?h;70=YtMaT9F}2 zIAe`(2tL>F0W!a&5Zl{hm6n^$=AC_?^djkb!ja;U)MA%UU*`1sH8!kVfu{{4b+337k0W$CP zrtQ;lmt4Rg0*vRC^o|FXG~%fV8t3z*>QckpH|CoL_>r^!oT8UUb-bRmua1nwghA-A zf#hFuF!{xP7CNbe4y$hQeHTTlgmWjeY+Jw<;JM zZ=wDgzx7>84c$I>M^;SqK?OCF!n0Xv{ablz(>k)PI?iTz%CtF>yAu(G)o-3!jj%AL zCm9Et3AqPRJ5(&!dPRqeujTwm80w;W{}odqOJO@ypT)cmRrtjQBd-l6udW@>qHA18 z!5IcN11BQ1ESLSypx#n0~B#vqdoutz0}^pX5G&r2M)rd)eTFclgT<;3b}K!zh=a&SsB|7 zwZFWOI(!n}9G>0>@2|ZNx5_VO@MuoRCOGiiAu``ACn^Sy&-8J)NaMczc{QOL-Z^7t zhZLF_Y<~NZy9Oj{d02i{O+As4F*_WZ@H1IY)2|R=ElS+0no~@5_}aus{q0848t{|n z3%e8ByfCBmE3$Ka?Zxhh4i%>g^Fwfs0uA8l-&ydl+kc?L&ZwCK0NffSaR9tH3Hhch2c zDXvq45)72Mt`qz3oZzTP-$`$TpBkXU$A zpKIs<_G2`7AP7Bw%867)r|h4c8EX{>SleKNS8r&p4&|3^hstk8B?j{hTiSQG0KkK~ z)JXxe%1%Yz>7e$-Q9c1>(83*xi}f!rUl%CE)$w9kLdufc$J%gl@@vKK87qoknfOLh zjL*-2MGYO%G}?{lfu)_;6D|D5&9>C5o$eK3iuxP+=5KDOUxTg$|EfTc<6@Piw$Hc< zr{>d2md6`RC4+{*+a(l$a~*kK*APRatOn_TU$f~l1x)f}+Ss6#W8r3mPFLnXl%5!l zZ~U}aR=&lE)xAB55!#{w$)pf%=(d$pl^FUD2Rx|{SKC&H4vydC-gV{4i8r|n>B$PI zDn|q{L)_PhzdDp-$LXws;N@k>`Qhm~MMf91`VFF!^Kk9% zUqm6fKryJv?7lUBZ*6op?i=&xN5ya;=cp;oQqLLKtM;DgU|UD%1hK$E%$^Jt+$ZXAl^S0cZT)7e);>l^YwuxHFYJOGTuE7x_;or zf>@BrIE{NerMP~@nt;A*%HvI4O#S6(Jdda{9H~I%+80H&AudgC_ZdPEssQKQZ`+Gs zqwedCtoBS2?il#|J-rfNJOyJJgELo!r052RS0b+Z*@neKEjD)o_4PT=s?{CZ$$r)On%?<{m%dHK?v*BYS=rCn>Vpb{HWM5kwVW*z`ED#(RJNXk)x{b*w}~6 z?G?|+q(FbhuGnDifgBa%^ks3nuD?rv;D9(yCu}|5J^wv+`;f=>E2Q?+B9menWXNds z!q=O5&7LOv;41z&Ps4{%^&vH^G6tmyp`zFjxtMU)1qzBH>$WSG;=cfO* zW_ml&*s~{ZhpwW_I9DNTg}onJviyu-p}m?ovQVJb$O1CksFq9o#Kk-9X7Hec9=CUn z{)5#0WNAJgZpFi}^D;W)nkuO!b&7?#UEjZtw}W2C8DzSc#aMy@WKdRH-g4nDaC`|z z!{d0VP|ie=;YOSszU=;`LR(b$Md2}@A+!%J?@Ds8`Q<=?{|F@54Y!iQhg}U`D9zSx z1$`qgY>N&RH1p&AL1$d5(p~oV{7YVg^S3XKY*6ext#f~~>9Mc4#oec1gPtYDEzsWF z1<>I#*NK1}R*|spdyG}GLS;-dpBCQ+2Xkat+>(8MMV-nPik}j>%eJS^HVH(Gd4^0v>^#@H@cVf&SeU<}jvJRDV`7SOPEo>}F|Ymb^xwOA{Q3B+Nda-4?+F&L$MkI54pmG2F35ra#4XL%BJ|Ck7E2CG zd$YFGb6?R~Cow=lI?=bkq{*`tfh&Oh-xa92=)6Iuu08DTxR>`cIXQ-NM~ZrQkdqtT z$mE&5^T>od@4%;^pm4q1mKYNeBd7#2HkzGUz}ID}03{3Yq)6TDX|-e##n39^M5#l| zOMF(&0tyRSBm?ZRKU1gZ(U|d{T`n{db7>SCiO+oF4Ja(wHWFr4a-W~B$|M_#tx7U&FMz=O&o<`D&IO?7P!f|R0hiE)-FXDIyAc>2^v5 z;%VaO|vRQvpxUSf(g1-|nzTxyD~l4*RQ77yNM@QCx-7=b56UhBml90G>akr^S? zI?$->LU~e6r~8K9u%#1DZrkgm@b+xqcXdqg$U%(SEX6D*foa#eMBPt6M{3E}{b7r# zS{rzVOy(T9!->RC>$#@$@CJlw`mas!4vA&%F?EoG{J3CuZ9-zn6BRdL9mvf=pYOG9 z6CkT$irGn1C=n5x1tjUACxgh+iitW#Lbu|@aNveCrR)hF-rg!H4ew8zJtBNJ>Dy0y zay2ym!3#vlf!(SpD!iACoC_ZhNtAV}h-x9rd4_cF_pJs#AYZBKU=ZhjEn_RG9XY-~ zX1AFd=LtU@Y%taGeKv&B@29C#xkl)0Zi z`TYD>B-vV0G4Hs0l;UEoc0R%C&22z!XmYd%Mr}Q^fdAE)n2RywUMJ5x>Nm6+`S%gg z<-)_dL{>*#q)voc$jJbck#Fo(3|?5piSi=fox%`Z2pGj67XUN3#}>1qpT|6Aa<0n- z|KUOhTwQ%V9%d~ex1(UOTqd)1uc)Y)*crJCBA{8a*>Dsqv z-dS4JgF{r(4zW>eX}97NZ+tScI{n;o|GR!WS_KLNaPwazc88=%p}%itwBgL!qIb+F z3qwkTNFPCo0}*TXIYa;@iun|~#g7X{;; zDp(#)v=o_lcA`_h6jnHucxJi(p&8b>>%T<|nftg7Gg-WigLW$NrV!6udE<5v=|l;u z_O2Ko)(!*_@5MKEV&E72cA_UvT+qVCmSP0yYP0&pQuntvPtD(a8-1u$T1bmLVo>DQ z3^_VzYf>+0g`#?E4q4_wLZU@n_sssvi|vVM0-H6pwRX#xU*=#ZeUzV~%-=R`_<-_d zO9w#3=X;y%fU)xb4kcJj6TaUqgmJx?^dy+0fHp;n5S9sTl!8)CNZy?kco&trrUb^S zRQ{4_W@a4jza+rhk&Pd&Y1precy%ZJ-rr+Lxm8flAgb;~Cd`H|0A1 z_4bvE_hlZG2byq(@a(@E=!)OJenfS@AT*n)_?nj&+TjPE=MPDCw;`60mmQv$_f^#$ z?Ff~-AWqQzf^W0+#+9@q8SeC9m+DJfeVfbX>T>Ek-+{BZubGSEBu{8GFT2B)g-1l= zYXdxvDD<}+t>;zS<3EyPnSAK%?9nwW7u_!KAS*UgY>SqbB@$-RfLIO@`XqN(b#nS$ zUhw^OAK~oC!piI2Np+3eu8+3St$%H;wIvHPGv!z3ovtWUX9kGYRv2D!B1P61Qc7;^ z`Dq~Zi5%OO?I3<-vp@WP&evK*w9)(7QYMEse0AI;|EJnA0bslj@OdkhVx*iN`p9b8 zB_0{)5wwb{Fuy?lPQw4)CugR!E|LySZ-#JC25j(Sfpiq>p=JYJtWU+oUXxA9V&r%zm&%!*QB4(w zE9LA=vI5UTW0Y@8+B7RV_dZe9D41^!WQ`Qi+tR*p-)Gc4by!q(DNd-)L65z$CtoIG z_2*+h<{S`O??HkT3d4mt|86S7gr23RmqTx{DkC2lW2%016d)4Lk#?mH-`*JyjGc`@ z1@oyiKdF*Vfe0h$tFwxY>nqEQwc5jA8ho7~+7S+kM!QAud3w^4W+3(Y2i(`mh045Z zzWNsq+jXY6Dni&n9Jr4fLx2}QGr6Qzz=4%<(38|)9s7kn`et)E?{(7{_#1MQo+(Gn z@ye7D9LZ5k_4cBQ=U&YvX!m!^K{^(|M{d}QJiGn9;92?NrQ>oOrQ*_o zVpuG9pptoY-|#nXt7=OompxO;?q~$Ayjn|CWB^NZ($&{}*}K>O(E|MbjSY3}+*AF_ zkYgcUglA&@GX&D14w8!|6j-efWW8bme8IB=U1QU1ITk5`_aVODUIy^M=se5$RJm1O zk&#Nz-dwO3-s_NCxms=SFLQ$QVe|6xnC@2WfS^x0Y%@mV>4il%Dk>_&Wh&xa`AzRb z0X-7HC3&+kzj%u$C4dWkcfsH#F6&C1(H4t%PC-@c#5dW#rcLMZELP?uayiuA|8pac z4SU<7Yba|t$fK6-&WSx>*?6mCO&tN`*Q%=_qlP}~{8&6q|IXxoa7K-NIuhtme)Qe* z=OeUY)G2L8KV*>Za7th2T@u7^_gIha29)TOzul7Ya)&=`30zEl{H)zV*h;h(8DpA! z;>87_z>O&H)4t@c&Kue-ZPEU^5$Q+Q`Y0Z71y*0b@9=`5A{^~~tue4BjcK-!A#YhqV1tRvDp5eL>(qS&ddiDAvki_UMHwtU)6 zKJXrK8R{ve64Gt)GIU{RS()TnZww0&NdcdRi4x(^ncr)X*F5ACF1pX{goDi;3(~PF zrux}^vOW-1<@oVzpXfwq+X*;mFeGpFf#=u)6necU70n(2^>t@a+*uG1-PR3|ofk3D zBU$Rlo7P^tknJAJGc=q~44n-JUe%5MzB8gh?vcmqNH@ZXQfe4weFto6Y|b!rCTK%h zYq2HAd*FJ8^3IQ;FJU=KIoBlzo>M1P5rWP4Xy0}{f(UdqY33VP(ddfPlLsg=e$ znAyCXf&51NTLOifC((%dG?#It(SNpwcPr9m+klo~tGtu!rG^fiD1OkNihVF2c523IwjQtImEDmGskT5D?n#qnyG(&+s>tA2>$a)H*du<2q2 z&figL`*A}oX*4^Ux2U4p=CK`#PUApL!)SI!Gp9>}%A)X`zM2jAlI*r^X6@7s?cfw6 z3FckDgbnQZ0jCm3Wqp``aj&Rxt(h_P|AOO&A-dBzwrosNN%Uug_u<1}em>cq+= z)Q$7bM1~69T*s&ErsM-^3&H#Ab{jSx3#7vZI-QjTwisVnM+>5Q*mV&oaPUW%N#^p~ zua&-@AbLF;eMK_qCLsaT`O}w1wIsz%eE)Q+P5-8|m$<~lFugLTjt;HvE-w#t^+QUN z+hzNYZxb2(FlsA|U@+JWYw{L%P3noe#gwY^ImDJFE{*{igcz=Z*)cE-HGBo4UbhKe=;gN2Uce|5ka>;beiVl#xKV5%TYe1r;74 zlefZ~&ubbe?1U%TpGfCei+Y=3{F}bi6Ip)3>|($XQK_WXFNs_n-sVEc2;ZyzAXeIS z@_kglmK(X~^_uvm58kX2-l@_I&Z;V*blOt_ULC($xvSZ0S+z$8q9!|rCzs?Z1>18= z#KIIJJo^(5EaD1Z3}{B!irDT8p`wjyMeeO2N5ItG&DC1+Ab|)8jkzW*oYLtNCqmm} z@vEyu1L0ndWoUkMQMqPkfQfr==0Ty&9s~$t-Oevnot5tgD8|=Z;ajoz9+{_28dwfu z=@!DVu&Q=W?EK!h&A&0XP0{YpGp8{&pIa(1wEuhU7QuY;FwT{u!$V9VAt)3=haqeP+3G{VS z8CH#Uvdjkwz6&Ho;f-re!w(?7vDbwixj@_#jD42ip^V$lOY0%3k3HX+>=kz7zI+*N z=bTY54z8-uz%a@8dFwhH3EaDTq?#()s*rNrFrAUVbw~&k^P{wSKwoz?3bXt)zMdzv zUItnI+L=X`1F`OoP$1-z4*?435yGc zFQvb8XzSs8IZ8oiW?~~I`2J7`ri!gV{9<7|U3s0iwDEq^r)@ceSW_XF{8p>4cVhcb z7(kXV&%a z5k*D%DxSF0k4aU>!-hGG_Ie|tO7QsQlv2djAMM&o1(o71PZvmM=;GaJp=g}NEOcAe zKE>u_{`X*5Fv5PCyjs$rdy3WzK}9Z98>HcEq~)Bm(16wGdQ;t!v_R#x$P}0aUIzx- zcBmm4awruwR_l%kLkMIdVR1LA;#JnTiEr>9Ty4vP0~-dv3>w_2c8w?eiJkMx;gBWtD-s%V>mkA8`Y%{lfb2b5isD<6NNl{iBme{c*TQtGyvm>$6Z|3J0b zyDgsbDx)3fa4c1=IHmnyKWc%k9HoZnq2w<|UQpj;k#0ed;6EFPdYP=z1OLp3nUD39 zGpNi_CRA^B;ei|nU52ice|s&O399`n@viwbeABf7_sCbP^e(|HaLzDMvk~s620uQh^S$ybrK8J(8NNR#6SE@fmJA67_-g#l`dz1;ftDfbdS_AYy(kj39U zjZqYSg&!5k9TNPN=cit)Bg%*~;&ptj0^1>;ys|Ha+nA%j!;kk0Fb2|oHyhQgwa;is z(QY94S>4@;sM7XCDd_T;f{vg#$zM=TN+)}KNc2RB<)QYQB&1A_i^H#6+%fT3*0@$s zC#+aqNdm@bt7p7H+j&8Q0nSERLFx~1MGc1N#>Jt@Sf?N;_!n z&>Jlc)T6;BwFTDGld9B3F|WV6UA2GII6v?*J|M?EX#ZeFhpEFotAk;Wa9<9*^&(!U zzS7Sl^t+GYjrqPXeoOMZ>n(GEhRDE}elU@ftFcCq=mg!A;s7wYSB;RZWcaqUFH8wG z!|$i*`nUE^ioiLePg4j8jvE@9-TRCcNTWSeo-f933NOscZmVMqdHd^6cD*CZ@7q45|X@usX=mhj{z@ut`^JnRayfCGngI$8W-Dr{;P<$L+oar`;)M@F&9X2~Zn#$%*)0 z#oSC{QgU(#@q%|qKt;WUWTUmFdm8t|naf7u>1U{Df`JjtZ1lIZALAAi{VU3A{8o6# z=bJI#AvXq1Cse(}#U!?^h%CChg$_4;K5V$1$VRr9dGC%qiQEpw+^ zmSEv_ISOC3?us}4QR0n__Zd?9whd#2y{Sj}ZK6M@W~v5P*U3gCz=n!_h`7D(!&@eJ zJE@H*FcYfY1;6CA&IFVFwG(-k*b4#Vez#{@dMd2#g#3CitX#qOE3&7u^728CiW8&0 zt8C&eszX);YTZrM#<1G#-Ja@Kf3-?o=uHZnHgODr!n+BcMk1np1hK=<4R+r2Mp0SU z#v-vT`-p$A#%J)apv(1dq|4$$eX$X3zOh7@=}q=1p{}`ekLBzzBDI5046>pOXKHIR6WsPH$? z8f3^)K5b?f37VLYv2M9ptIAO#6gA=fEPhI&%;6yYk{-(}2BEa6k9>J(S$HRKtCj~Q zLN5!d1j+tPwDjj)sKksgP`HxH)Ly>NI89glpzE(UStaxBfGZ;LuN#pr-7hf}%Uu%$ zV$JW#RUx!nckn8G!j2xBUeryWrTsF5v=mY0@r}ZwkSzX$b2pE}OBlD&-HXkQ9|iWW z+c?aZCgbiFTv}#4@<^8Ic6c0RIoK1lGJb|DJu9!yZQEUkcaoKtxrhD%OZU$9y zi;nq7Xo&H3)Gg#(Mr^F~(?&M0y5hX$f&Rc!eMPWo|IHKd1^#0Cw)zA`Nyo~S^NVs+ zK}x5j4zoOA5wDgb+_?(>4fO2dJhsz#^MRg=aBDAL|91k1NVg>Zv(0* zGA1N(RvLqJHVg4KW8zPEywF)XIt@afdI4K7V3~6N;kJr6EGem8zmNY3pC1yP3M8*` zX6IS!iR-D^@Ow(XkE`^n;Bz#g-OCbl#pAwDirpsE;c_`gll5|B<7RbFe$2S2!`=H1 zA|kW>pP!LLtXLj*FCjLqmD&MaJ|2bVn2DD|(}p!6QX9lR4@8Co(fM`F^m3WpVNpKs zQAe!j`837NiXSa1kKtdg*Uc?AYLHn?_ygqD-KkX^MH)*&ZOa_9#nbOH?N%ct^h|Z@ zCkx)%A$RRB_mk8)S<4^B!^XFq?F=@59Do+R@$n8jEn89XWe2C0O5d!ePTbG(Z{B({ zP8apmQZyH<8eSXWzq$X{uVYXA&et@$W=s3ZwA1%?nAD=bkT?%`&Dzb5H`}7Ani9VE z-ERUit?cbwTu8^tkwy&$WCsU(rK|h9ba+R$(Y=+;jw^LreivT0ju(n@mYwH|{jh#=v!HLiP(Hsdwkj9l>nD-pEZE%1D78Q@ zI!>K5y06@|Q1DR?BYn@IO{q5OR(HHo)^-u)Ezlox`S0;uyS+@UT&H?i?kVY`hhHNk z)$Qs~t!Sc!jEbdLnZkVBO7jnDyNG+nXVcoIiZHJG1J8O@*(b^mm?X4Y^}RRM`Umbg zZ6|%-WjyOKS7MwD&bE0m^X)B`u6chaX?mMFGSAIN#r)Pu=*%-`*nqy4^8AzXIloL3 zsEDHERCINoVS9m3%1$&!HZE2hb{QOMO z(9xrYLJ{Fn5YodI6h zl9_o842r2DjhsrKXXF(7)id?ZQ}i#ClzI!NTxARwc?Z}S5bN{hs#`eKyQ!ww-K&Fe zTwGjZC3}DT!q-sXIRKUvV*L9XrLWG(&3{`-j9=Hkl-!++U_c zuBN=oHaIosN{&M2^BG)Sf8Bed`mA=ApQkKWr5O_=k6JmWk;Av-r$438bw+!1E|1hD! zT3Re8*RM*Q;llW=+q7bXZt^&3)Uv;<)iWGehCV03f(ZvsDk_j#*}hF-KEGNOkzam}&o~kB($cL>0H4z% z3eS7ChzY5CIhEh$i{)g%{}jA8e^CGTvt?A)BMN{eD-X$BU;vFE9_~-=9B3P4O@ybg z3nJCtHD)5_2q<)`1G-CC|!zkCrU z;Ia?=aBJFNK99Gsz+Il&(xMI%eUahZ4A}$eNSyu%&Fy@wZIu0$@h`E@W!BSc!x3vu zc+*$_97?}yRz88n^vz9#@A)9H*U*ZaLdx^pY*~@c)!p%^w2IX_dxJ((LxV5`X7^P^ zA&=|OkHW3S+S0Y?O7Nl&g^i2tq@3sDuvQAwqueK4E}|q=q<0XPyDdtm>${4!$1BFC zt~+j`di(Ue@SXST0d<0yv0e-G^{LTS$Rg|$Zv35B^Pf4i=!=W=S^bo=)3Ajzgqe)T zYm017Rixfu8y(72{}rrCro=V{ZuHQsQCE!0fWTe@UQNCAG8n^eABv{E17vaqJH4*O9>24(uug=v9~{O z!`a%~SG~gg^iaj~3xF8BnkX)xZ%b~tJA0|w3WKvqEGP^D>yu4Q393zSBPO{{Tw?|( z9$}R_93khi(L<`qK{NQ|(9%rTk5?E4fW`jwCF*i&pVN*x^d*;5>5w_4&Bju#DdPL##i6ixUI?<&)Wmzb zJ3;aAc*ZMN7L7X@>imK+ zjaU+Wd210s9)d!h^Zff1c)h%-cyF45hk~n_@n`PWM(2R#$x|+e(t@M!5r+w%1zlkD zFH&cVFUMXEG;9{7sfGT;5S(N-Z*))RL!@|)VzlT~q|y%F-(9ITy2&!Gur0V)Ru{bS zdpUd*4v^cOE>7-l6SL41ntwN~b!@L=VZA($nEdMczQiiMr7W#-dG3JAi4_;N>ao-} z+5d?W$>+GS^A-uoaql-|q*&o|UQdZ*A~%OeR?$S|ZL#?3H0r-90$vZg&e zekO?QY$C2>i5v}T9i}$5y<8f_ahFx&eK|C&sMeWz@&Ql>dFPIZS)CyrGT02V- zoow18Ya@;BrV$r<-vgsAt*KVQOrz@ti85I1SbHLb&B^-JJF8)1!}rh_H7B>-(q6B- z)}W!Gp>Bzbt_X&8a5=cO&XxwXxv`p&+vU8si?|B)5TV8EDoCs%3?Ej2#?q9ZSz`WB zRz`*n!m+ugvuf_D;~b1XRVs~byql4j7?dj#Qo7Q1viKfpF(@k-L&S&C&TsdE^Mr&0 zPYh`iZ}6d&*}%D#aQCGZ)$?)$^1>QqQlb)NqLyhgxmQcyE&Ey#a7};ZpaY8>*|~W!J)#(u(2s z&7iO^)-UmcDt`5;k9u`39Zl12K8AA9z`0n=G0$ZC59&0>Mu46BuP1Hyjp$%MrF=ix z!y1}sFf>RyPtDYgm5umg7#h7Egw@&oiozd?MX!}4DHx&Wzuaci5%z#%@rjt7sENH3 z9!1})-7={wK4`pM`3o9CNN}}CGSN7I1yOouXUhk|ES=g`$}KK)*yCJALL%XG3h;Pm6R?yyBXI!58B9~qBml~4?lb3{wX z5yZ?KBCx27@h%tWEF$Bwfr*WA2_iF8qEI!C%_3BZ2MgMCdyrPVi}`w zW~pyz?K&e|>OgyI3p1jrp+()cZ?sU7h}RSlGDGCn`=v1F(R;jksm%ptZJlopQio3e zLY6~byvrL36;&|jd6{L*`h3frQsrS)r1$4=Xs^a zh?dfY=NE`QarL@xev_1s75&x2N#w#hQ{P%gQKcwGA0QaHRAcS%d9AKs^CEdi=Vs1~ z3=sri$1>5?JRof)}xF~#1JXFJJs06SVbt1Wlcm#({P%2A9uFeMDm3R>&Pr= zh;(coFp4;Gf-$L#o{jeUfM0Xr9KE=*5Uv?0Rw2dPD~nxWb*RB zbXnrF#fxS=R=K)B#IHp#nt2c24qy2>mvK$gcAIpzg7h%}a=*hgX*s!6HlHPR-F%+N z;tn3nc0(K2dRizu&w}YclY4RJh!ez?r*p-vz4~}*b{Y+d6%U@Gp%W$J%Lzk0o zVibX!RF0}YmoYo^Ycf%8p~!V;6k**`R)73s`=5Qv{{*#*U^qEWc5ze`Am5d7^VsA^DWLVDpCocxLM;4=!S?7OnQw z8^Zdo4qT!?o=Y*vZch}PsSt3p^adltf85*bcCe<)NT9Qg1mIVDpJcVmn&aoV!M*=F zIPGb0k_Xp+BH~d26#A})&Aw0`b6hfDV?5Xxfr8d$^#fS=+TskiBw1Bm zf;jpgEr2Dn5;YM$12IJ2yZxaa}0{%N88*wnxKofy^f|7GL-5) z-k`U~Hj6nMS&sefxFoYr77@ZJsper8Y*E=wURM8&XQHyJgG_*?Lr_iX!;3yQkxkk- zt;05pcc}Ed5r;IdaEVsCn5|&y??+qNv@Z!f_Hvn0ZTr^)65)!9U}E6YNy(dit%JW3-oc808`K||F=_(pMuT$yXCiX8Sx28 zrQ?>L4&#Bdkrt2_-0oHH9X+reYDq_M676h_C2NAc8oO9>Yqf;-;Qsi`S!cs>?rnr@ zc}5r;mlZBd!$xa9Q8m7Gcv|M`Zj4a2wnk7e#m>XP{TXtb)?Bq=k-fR*_rXQSCpaq8 zwVOlU+xIkfzXS$tVoUR&Dt>#DBFe-0M)Y8$8lY`yrKue>Xg3v%|M3Hp!qXO%b88?5mADoSfgl{ZD&?uK^c|gPu%tUE|^m4A|0*vgpeilQL+!`%J-+Wb~-V^r9s+ zE^G+7iO;o}y4hRw`w?L((>CD(2#a!nN3%f_DD$*2(Uq9FzNFd|km9IqZKw3f*A%V3 zpp*~#{mt;jYY7W2j8glBJs5M)89&0)(IPV{>%~xCb!cTvT>Y*&;OV)8eAEZ-SLcqB z%;lM!hoePmP;Ms6$+P20V^_i_@`^P@c8#flx({c(8qvjGl<2r8x3QY0#=w~cor&nH zqYW7jDK2Ju0JS$@1e}`dNObzJ=(e9Gf~}-Yi@G75z2mN?cv}#)iYJ`)q*Q3 z6D2d#2rOxn)7q8^q8%Sr1z1yJy^DNZ4s0-!j`sIXIF9 zIiduccVO5gYWx3Q4|H!EZk->+T+tc*Hs3gzOhfvOL!n zELx4X7D3xk&v`EQ6E*rs=IaHevA1N1bHsk@kB@G)2$`RD*M{7=DnIO6yAGdnX@4EG z)<7mfy$e!pIviRN>UD5%<-e-XU5r%r)cHZA) zITVdBR82aQ3_)tetmCc0lX|tzmBsIu z0NoR;s7u)nZe-F6WR!8x*nPB2A`^QH@S-S(m3M?mI`me5$im{AluBCR!pZm*2|Gny z70fkN{HHO)?UIpGBssQjs(BcqPuY`=TQ?gY2N#6AE*^F7$d81Y5z*ZD=1eXKN4TWfZX-;B>GiQpX(T$*kS|%6f z7X*LvJxz)OoPOv$H{DiJmTBcE%=|VJrjPjWF|7Tp?+zl`Wz;$;pCxkA!_W`L?e5`p z5XQKdm^9KSkN(n;w=5r&Gg~+=I?9}8RxvPVez+;-YkDypG;ODD&$x~qT>UKXOBcqi z#KqCsB=P7J1ha3=XC1}Z%ti7w+<1|b0Uj~=cUGrW-Q{J<7uV0{3#RA~?a>FcTaRzk zlDCxMVR9_}FE-lqPCh)ekgCx82L{HTcY;*WN7?+SpzP#nUoSJkW{3a_CJ;bK>sj|> zdXNE2&>F4s|4`L*sT*guB0RP~W_{5wEW9OCWuDA|2tgBZZmh{pVrB*Gt)CZ@6`G~G z<>kz~hd*IV)hzz({UIkKgK_g@<=8fzn;*x^-S(IYMr`1aZ$pu*Xw}j#r5#z@T<{J8 zrE~Q57SLnkQLrjgjn+1N3n)(Jli`X8kCqZMH%}UYXJeKXe+cS;9!XVy>Y;Ic4ExqJm9e1hANNjk@oBhw%e*6&}MJO)4BOuJ=Y7l}Bu8CD3~)+B5g`LkbL-=;O0S7{@i1 z?TdeS(b0ly*n_NeY^0<_%+?+fOe+{oG6x|-P8Bhso*~HfH8bHVl`c~d17T~l92CHL2w zw9zjatY(tx#Apb#!VQe>NK|W$m8>!hgndF<3q6wy5VYyw>U#0a*Z_KJ!^fsS#;Xf~2FrA0EH=4%ph3g{ch* zbYqR&aMZXff{j{Zzi}V_y-T?lCrbK-Ep0K0mdn!Q%zx%P$3ZG=r}CXd-XC@|p{%MV zZ->nZbiZit09U?;$m`1Ik(pQ)p<9?P-Xwvncfo%3-l2Q{2w^S5&^p0gd2c;>&H@iF z_n&eLj%CF@s{hf+1!(Hz;Ck#!& z{F@9L=$?-^!40qbz{e#QGpROV2@g%smW~|52lrKTU%f+B@opPRp(_p%QAX}K#xW?!s13)X-Z*94 zzT7Kv?SU!-_no%(aK31{{CPwM z{uf==X~BXs^Bx5U70_*0cv{VceG0;B1a~DF`LDy)cx{~vy3+~b)jMODAZ&ymcH7bo z!VyC{W@7QJaVjbKM7TNb{n(QV*&HS-r>cbO_*7$@;yAVKgteLnzdkJ%%^dO(8f%J~ zpL-xxl&M2fRB{fNfT;L2!^sO4m^w(8>|DC(l6*`FnO$){-W(HqrKzMQnOFZy#qfb3 zOO@pF zy0Y=vLmJSnYmBx1>n{Qt(5=5%YCV-70S)LH*e62+x`qXG<>L{-m2QUiZSIAWeR~)go5Ie{2yOhW_U$!Yw6>Kd5_%(2Yb7E1G;IjL6}iX95d;H5H6&iHumSgThP?W zQXRRxCX>zO?H$pQDqYz^GKl8~r{FjHuAdTPcBgncGm;^Qg{!)wU@cfy)u-<`ov;mjC|7`OGxD z(`^vJr-uTQ8FU4B{q!NkQmOlV$KG(a5C!U`CSU*Rq|{WjU3K@b%jc{*K)3iaxT0g} z&$S|;JHW*Q?=f~}7d=Epx%#Dxh1P{$;_mrioE?xgW*% zJr^E<6nAckEB2XIWN-uK1J({^->P?f-t6QfM z9oIL|O@Tc^jaxH$tSip(Jwz7G&l`Djxw_(ld!oYqzLS%BUy#pmINud!o=w4dzH~wE$<)IX);be+}s8;_zog{i8}XQDts%lY?dQ>^Swj={!tgWE0Ff9ZT9%^4`(Wt z(v%uX$|%Trh;{PG3+BppxH|6(dPn!FfUdm0R%K&c)w_Z2yU4fFDjx2YZ6{p_(7lmB zw-;?ampvS%ZkO~d7SwZAH6xt1z&q2U(Z5|;*P4p1qq^r-ozKSAv5f|Fua5hCd0b^K z?UoN7Iga<2f2B**<^$S;$;^ZL_QZ(6{nXsIc&ksKD-bss4hS~(Q^`c39FNX$J{)1> zh1?1fn?*z6Z)J*QObQa)WdWVX({r&PI2G}n0Qayp<9L(@sij+;Cbf7U;&?QLiAmDe z2`YP^JS&#Xm zvsE&Y0NwjP^~4a4M{nF6qb?C7I^7L7X>ZnUYCGTRQc{XV}aSL+|i|_BsE8? z5(09pBsx!sjkCZ^Tg$T7BQ6xXPg|TuZ3V=TWzfrqHc^tS;XzHK2R>-n+}? z%r&5EV0TIbx`qdIV+nL615RsN1R6U6!Dmk3qt_O~-f|#1w7n1JW)7^QYXsjm#;lXe z1+uV0gr8##FiuFkyp7Syk5)YtK%6?Bj9>q{4;{MuqyNy65Zh#ZJFn0yeIg zRIV(eI(O$OYYgj@jxOBp7OHimo~}#+_nx&>HH&0&?Mz!N$-0*Q^nK|k#xkjnLO4X(9NM;eNadkpD)$SglK@EIyu?H%e`3%h!;&@J7MPr zw58hfGpb4LiY3Z~q^Bsb>u={+qyi00OUCh(L>tj5JDQnO{o4$_RI=`ji$XAeE;V2WxVWRIqZ^ZZbdCw~v-OdoRJ2+nDJKUZ ztm`Xqxed>4u!|?_*6Ox9otgx(Qy$gK8_rb9%6Cl8^S8)zYTND(EO5}b`Ae*RjtEi8V+Y1{MyvMdPxY~=b)}CpyrvVWygz9vaN-!*)xjA( z7~|q~kfkrVcG*@Sv@g%27q=;X{c;T#BBPO*l+pm7L|vER5s}z`=r~RX1($Kg5QVNc zp{~81jZ*Ijh|J8&f{naOR40?5!Or^f=~x!TckVe*uIq>;oj|0f?oAWN6kSk#0bLpO zP8qewZzdBFZsemtZaS08s%r9izk$l!1+8=o`|_y#A~GGHQk7eqCSzW6bEM49>F}-DB7;ea4hXDn`D;L{)`(+iCY>>s_ z@OV?~53<7fI1@VQ0Il3|mHUV+$jk9%QTb@}BYuo_jI-BFXw(B*b?DXhsldfd#`NtBM5e2OvKiD?QT0*vi`8dPGTr9V#7gOrG}o+ zI}MwU(n*NNy}5sCkwQz(2f?uV1o0)kW}nWu6jH&MZSW&;$n4O{(AH(I8fz>Kp^V&6INLKy(?O{ z=CbyBHrDUA<9ss>&)gWVo`n?wU3I-23nVG&YdS~WMD^^W*CtW1YlY{&byMTQ$M;uX zg(`im*=eWjmT#kq_YwA`Z(_wPjuld~*V{#Bxh%@vJ*>4G5@V)po?Y$p6oB?`u8UPB z_mSU6&{Dc3$Fk#u(%HFOL*jJ~>zJY3v!rJI?+0zLe4~RJckUjZj1JttH|?EetlSvD zI?l9=mcPd&Q>egt!$Ue`!jDR2nM2E*J1pKzjdLw{j6S?(*OOE0bg+<=UpwB8y;GS?K^)$z=_W>{jr%f2D%x{ zpndw+D5NtpV%2S)@GPa!6${9-ROrg$bwdH&U;o;S^o+6-!A|Yls_bM@BUxL@^G7mX z>s~|mSCdKj$n;U9qGZ-{Ri8Y8xszNiVR=yodjF{;1f=CwTajB{|MS@A^%wj6Atnzk$)8jo>;CUuKs!ExHKl%fgR-5+?UI=#ytj8+rcH`hC} zKQnkZ<|-?zm+pvzJtvtvPg!C$7l!3nX8bB;a01wdlfg&ECZYqOt|))D9J9vv|Jrea zYr_74)J!8~12tt}3MR4skp<5~YYiKLih!=b+;toNL0}UDT}OLc^zYLhy}NbcGg+6g zagy-w&b^y3!J`Ehbm&zCX3LAzh0JukefEd~lA`R*B=q#Ao$zfwei-B4g3y->NO6%^ zdUC&ll<};2o=(u3$v>S5RQ(7J|Bcrz0)=PLa?Rd}b!PoA$*YyxH$Eo|ckJD$AZ-@+ zQHm~tPx<%smj0OF*;0YY?+HCW3pt_GuN~~16!?^yzti?@!X(dDc*wUS_9n#O^)rWY zHZ>Vt2sH(sF6z)nsX_#{Zio!y5?KN3-Lh6WpSRr|TrsCzHw8(hts^v*?GkuScKn&t z!A&Cw4kKjV&KkPKEtg9JUAd0;0t2w_LI@U7-F%amuL6?avcF#=Lsd+&89TX_&OEoL z{d=-MU4qisoxhRJ+_%dw~Wv_XHuak)Ql zoIZ?*taRo4kX4YU>g>*-9rR#V5B0NijQiVX4=25{@y2&}~!XWy|rc)F*U&o6|Ou3>fc9)Ob zbGtf+`UJXbOm4%6rjOO=+C&lcvY$PzEOp-nc}2MSXhO9Wx#b;9`h3yX9%GrPCS&;O zs0{V~QQc<3r^F1tdrZ$?Roz|Gc5SxZNMu3%Db`y&OGxWzBhN!!4csm*b1`yr)cg+Y3_0(uRp@0$r1-${I`P`ofg>60xZa=*mL<3j_>eZ-yMzi-nlBi;Qrf0 zUaJ`B%6I+T`AB7fo0eX?go!IJUfLBkWCUa^ijIxL0jhLMJENGHnW0CQ4j9tE7rJ!x zS4mv;1#~5u=o^!%SY>)ojV{lLNzIFV?J$JS5Yl3}j&SeE#A+*Y%R89#`Fv|T40AMB zfOsV>n^*Bqb(>vbDrV$=fm`#}R%;glldr6E#HrmjQIO%i@2S!wpfL{(RK)fNZrIBA9COr2Ai zBsdA|eSmd%C6TDQKv%}QzYg0l>Bt_*95W@+S~LT_{jzZ{-wAvOu;m`fwHyexLUU(c z3lo?M2y{gS{mShzu(fK)H*gK;R_lv&nQx>4-OKz2UIwSH0bK*TNgB{KJfIs-29(Wp zu7(`a4eVp6osNM-Ko4|Vc|3r2ZAYLjZI&xj=IUXtys&QEc{@B?IHFVc-X$!T_iW!p zWp5~f?w4?K@~KjpD==47z^4O$z^R~>wF0`~Nx69CpBUIt3H1m#C$TaC7Td&p0syDb$uQ21toEsXXL2i6nA{7o zU9L%cwT*+CY?^ZdBzDWwPb4C?jurtUxtJ=d8&UoK$NF~*f>Kz2vieHsDFxp(3D9ln zmV>^`<`osLENDg~nlaydDs*Gj!s2V3EdR34jx~W&m8F#|sD8b}K>^gaXkR6Z ziifFwnM0eZ>-va=bCLqB(yn{e3z^I!{(erR4($HpplbBq)IWuj$}9||3MGvbLV?JV zseC&#n>|>2?i?3yzhb8I0RIdwSSF~2;JK^aI03cB{p?>RyaIGXV$9K<3Yr1z$BfDq ztu<@}DgwFzCr&Gyx$Nw!Gw~bt451Vdi=TR&7WcYEs>4*O-g{uHQnAi%+m+w@il2>d z5Rm>a>iwYmU@A*(@K1C&o<8~?A*_YkPX51}5chZ54-3>3 zxY^s;O##K_gtA`T=c~RWVQpecD4T(CJAWt0;d7^Dd$hNAL>!^;@{p6NpUvvv98_Xq`mHoa5J&85pfq2ZX3stJFRKvRA6zkhQ zdE{S$XdB$r+*bkY?Qszbv@Yz>TLEFIje8$sBLA1#xNa6!I7RUN1w_>TPdgHDb+H__s6|<-fBx7pMO((B>H=MP)`?B>td;IeE(V|7 zshEFrLQ&bC%7Cu?E`W1asRJ+N0p8?%RxI=1`qhaE#5M|s%3M>8X?%9eoV9a{>~h_p z5(zWk-{6E@98)C8W-#Gsc}>=i0;A0F{2EuK5SH;k=C`82{p+x;DqpaG=P#gl5$&W8 z(Yc{J)vm)iwh5U1oz4t$%^%zw&9NzuZd#UvrOq5vSqu5bI5h^hVuF&484rEtp~jlq z2B)AI&tK-?GM+?m?0B8~2y7kKGhL0B;vjK?HH9DJcyS}g!|DKCiJv%GNCK0LiBotj zlHe`LNq-XZit1S`rbQtv*K(LiNut~x&aqD3ALP0vVM>(L4c9cF+i3HibnMJpd6n2K>tSwAP9Z|V zeylanJsX>Yx3m4v{Gu85oVB8AH%Uzj zUD%&MW$CwJCxmtRWL#PMRs?iUoC(6)pR6@Nx=)?h};& zT>*dNc5NV}G@~{1Ff?OLU3pfCV)rXTUICY4i@Yi3yaKv6Hg8SkX%7WVPZOd}-SZEj ztr@=UH%h5`1=>D#XophLt{*gxHp{lG*Q*1%egvSO_8g|vsIuMsV|(!jf$&#-M$$sq z1*^kOR|#}Ysi-ZlhVCi?=(o=t!BhU-S=-i20iTG>3>5>hIhH!Y0km_Le~QUS?gszZ`&13i7i2=R4 z8vxJ+fv!^#(A}4ki-%9uZ2?_oO?~an@G<+9Mbf?O^N||#sT8{TOeCzKLRUUj@-83? z(VM7B3L?mqdVW!~h?JiTSd|6d9(4IA4Rm#iT~BUrhOcK`pt6^cmG2%aH`_7k%ee^X zCQzj`?j>)I|9N3#{zW4g<&_7n-<<0OF&$$pZ5?9KTVgdUQ`K7qEvN& zu8en*yz~~Gw?W@2a?TeNgCGgzqm;^Kt} z3JOGvmaVA#xm4TK)zuBRPPq+Egqjt#qelbq%eo&i_4Yf_+Sd=OR(ye@0bFo>WU zyqe@2^1`C$5fU7PmaST|Rx9gPCMK{pY&yF4=z%G>-2q2O$4bv@*UlYSx#Dv?yWn~F z@|uqE8rSg}r%k(C^?A7f-N^Ij@Ydp|FtFFd7&Le&V)(*YUd`OnFjs<85zzhl+m-lx z{aV~Tbp<+f^r^6BZgH3^{po@3%;y%Nedq2lxu|HZ&IcqhBOmLIrohj|6xVmMfeq`r zHg?kkUEh}8s$Q;G9ZEgGvWsE$W4ZhtvyoSD5?f=1Zd=D9g{}g+XAe3E6m=8ObR%sL4B};QJ;4y8f)yCN@hJRJZ=liJ4^Rign^0T!@qjV04lR?s(4& zezfg-$?(9q=`TfG>9HQ)JD&kpTCp zfZceueGjGZlsdBlbf2ev^L4EGDz?G7gt7xR|D@`?uBNJ1Uo-bTs%%$KZToHC(W;JZ zTu!C}y4QO6D0^h{i-|4*Lj@ea({&Jr5yF012cY}PsYCcC{EULP_EfBjrLQzm5DS$3 zzW*3(jg7#(qq{M$T@Ttxw<)qAmh1e3HFWh#SyAwg*tVW9+6H&i5?Ye(4kyLaYB?C! zQ|Y^~gwG}oAmGRNT%)Z#-Wo~&(@^YKvyiMNALJmsfl}1 zjjX?}P+CME<2`KJJ3q{y z{gC=meOWH+6uS3dmGSf%oxf7(b|?jOPbSo;kJ1~aP<{Y zMGD=5)wMp4RMv70qlL1liew>J>cMU%#}6BwW2C%DEp3 zk;QdS1$3XiF+te|OG25movyy1?jGtCPUjQ2Xnb2N6VNg->6PY$sm9p;VpzHADXBRt zR+v)DHa^u6~U0+bNDW<;DoQmUgyna`l zx@G~z4=;CDt|Vexx#E#1)?L&+Pb8j8e{{yC04jlLyDZ6W(?9Y=UoOtSJ0qstnAG}a z8`O@KD0Eo{;_J1)V(*~<&2rhG?%CCGGNpm8T(v1FqqtR#95%4r1S9o+kbQw}RX5it zNJ^nAK-c(|8ttbPx}||GlacGvq?FNU7fUQ=8GsB5HrBEP(feW5uic>$iE9Isp-q*VrV<=HG_&de{|>5!04tL8Y}Gnjv5j0ofS zqpv?Jb6lB&O8xea?~h`cfRa%Xi~bMjv>*;w&)pn{VWsM>>$jFJ{YBvl?8%^(Q0}5Ly@d)o3F;GziHL{_OC9OJ|c;KQYrH|yc zq+Y$$iC_JAq#B>(TFx_Z=_4LfN;WlK(|~T{#k%Q^qygQgi~ZGdWDV#V*qzpZu3-V) z)%%n2{ozzx*V!J|w6*2SSmT4J>U=~6=q4s5;?~=4hl{gI(JmI2mhklSgw#i=s3j#O zQNbFD)~)?uYio-YpMQpcBS-M``~?KMI`vy)x5Wz=AT=cgQ>GH$Iytc>Y$A5<*p2{# z=6Um;g@1<*CF;VKKEItiwh>IPzBXlW zhM&(!bo9T4wQp^ZpH~PkZ)3FbGa}?>Eviz=T)C{YjPv++|7?t!GzQn+cpI#3E~!L! zZT=JAt^5S{O@9{-c5QGjnDw|=bJxYq2(8-~!`hnl!!Ft^i!xW=P6d9Ba}n$D{G0E? z)4N5DW0;Vh2M=4;SfM(%Db|FSfAu{C?DXp?K&4%U--&ut=qjLlPmLA2Pth9r23kVN z#50yE+G)q)X}MfC2sOdg3UgUU*PgU-*@blcFEU+CW~JRZAIN8c7A%GD|S(0aVU$L zGTEN^a&xtqE0)U@DRjm9@YQuLO8qN#R%+;mDv-2E*-_5)K z!axGf*SicTg00e@sB(qyi9N6Y!|^*eP}S+f`mQ?Fs|!g`B}%_RMG}u2#rt}QfA4DVqEyy4RQBpyR7@OFQqvj?=$;7_B%H zS=@DWu%{Ix>zFYX85vo8f3hrfW!3rQU!B{CTgrxwTgz2ho12+3xuZKCntoeRaOxH4 z8WZT6Os`SkrQYpQKL?EAg158_+4;Ecux=5#woTskZgscDQ!VY3ZRT5CJpYb$bgSD$ zp*t)8ecW8|Q+0Q#bUPXMPhH^R@cUlMBKWW8!ug#nX}Np}=&~j`A*?h3x<_b#`2gz| zUIMzNnEa+U1e%WQTCyggBye3zVBD%%9>&rtxq!#mu~gSr@h1HqW^&lee8-S?5m}Iz zWH7O>l-hryBAzqp64140J-^@P>)>idt#}RHZY{IaBDD-8GVPIt_~K0ubl)e?T}0(p zadnpf-D{V6t8x*WiUD0wI8Axmo3J{afHqcF6PG0^o>n&cnt@0YW5mB)?%V%&plWDZ3MWCT0ASHw@L`GxR z-h--!u6ib1Jg7MdV1JHBgZuSFyEd)swT7+&x*U&cs?b&SZV6t8v8KC>9mmsianJF( zB^cRokeljZjfcH$k<6s#cf-=KKDPSbKL;e^vHXuPzTl54ZJ5LvzX65d6riObVV)uH^%_6s~^`hl_0kKYnL%Zl7Sv4_*GW=eBQU@xKq4I zf{@gu{dtca9-(cqxLoO*JfFp4-%i}8n0O?m1*CqiC}by4AuCN1c3xolf?&>X#j0Nt zs`U_b^- z(iO0Q26V50n`q#dpaES2yFnVzH7uaJ_DCweJivO2Ev#`gf+wr+tH31Vz)EHX$Z+xq*XYu9dg_2n0pT4d%EPr}Wund*1Xp56HT zvt^hvWhw>_9fr)zOr^Y)?4%adj~zRT zpuj*LH;q=t&3OI&35iXyWBWF=YTX)sgufE26oTb{ckM*GcK%dAU#hiQo%>QB+kEoI zVuS}qp@ZL(u(xXqZy(c2VXhqWOvu*=Jx^8TtS8a6cYj`o&fZg6cxVtleD!(w`;J9( z_gj#iVN)f{&CW^0{(!}>b;`ob1uw$M#jVDdMv*9$xl&WD#=F+b4&kgvXu^eQcLEg? zz28weJ=Yh|J!RA$8;r-}v{5^RpcPRlEmEs-C6l@M`IwI_>tgC)!oyU^9HdR5)Q+re zTYPbmR_vypIGu?9l5>zNv%pH46B5y`@GSfbPh)7|PPFCUP0=`+tK|XRv@9b$$vQ*< zw4hqV8bD(4B$mrJzTTX+f`wT2U^Kiq*^@=fcUWU+?QVNKb$z_DY`VajJokU%p%xNF z$t!hxF2tMQ?hnO*fLYoXfv!wsrCyxaOo_5q`rS|K!?|C&({3=EwS(dnG?tHW6k)O% zhXYaP%HnNAf{C(+7E45#JV%H^=Le}nWQ6t4oM$$0V@?ul`$@UH_p1!(8fp@cSoPjY z+usZVX90R0>}UahQB^DTa;4Voy|m{Q&^L;fz5)!F(Q;W-ozgxGI;v{izSVUgEsUEN zSuWQF=(e%7$LEB#b{sck;cwybefT*dMAgg{73r^$=dkF+UP9!am`tl^Q2}qFGI&nF zze-UnHFO1r{z#?l64sx6k2cCfU3K=#D_B?e-LpqAkCw|5JX;pQS^=>>*0!uOYe5_6 zXgqjuo3ch;!1}h*R|eyB_rA?Ik&;NuxnSsM!w{+zXJ z-wQmd?2xZ@Z=udbAnc9*{;6u=iY2lD?2+5otGKQw_?4K;b9XX<`&rsn>#d&U_X2dA zTif94KDzznJ`UaXi>ihDanB*@I;5rNmSx5I5fP(xYVADxm~Cr_h{y-)T0jey7xM*)jR_74m*A3 zvt?WXM5%x6h#{CTW_XEC>kH@>a)b&n>WcNo6A@z6TD^bd;LdlC!jKx> z0KMI^EDR5!N>;{ZY5Jc}|Iky7<(n3TtD1ksYv>ACo&1IuVp2@;(^D6eWw9*0i*o6m z-&~4WE*oRaQXhB_=zc=glGNap=Z`E9iz2F{caE}Zp7ORQYwMaRHxF;t1(b6TD@r*h zSr}iwk(TdVY`=$#;PT#K%YLNx;1Vjk>I!swvHZibhXv@C-->`PYy2)I&>efxZc?@u zCDR@6da5{l`$U9NO9}iI2NZd?nasubc{d~=gYP(R(AIJT>nD!roPn*!tT3AIXY={K zBsGMkNy6Lxv{@D#Ygg8M%;CG+*R;FTC*jrT=3X`%R}6IJeZ)}TNiLi4YA0Hw2uM=n z!6V1<{_?MifUZ3AC24EufZl|;z2N1cTW`@lD=)cK$yiofg>DYaaLA}9emA~WjYnbd zK`wt*RkP(Bcsj?Ud-z9MEFT&VoJeGC_u7v~E}XQzM7OFwT#%4N>BnjRe>S7?@km@E znjuDwN1X~!U@+&yEegXcJ$-%sR;ev6U{~taw&1gW39Ww5MzSvM*aX}dL^589&GXC^^4Tz= zO>u-uKB^wj{XctW0Uy=XwDD)-ZiJvgf_rh=LQAo>l$N?scX_`WP}))`Ev4@6?(Rxy zDHL}t?k*wj$$m2@1VRKhvb%S8pY#6mh9>vkGv}GJx%Zy`nHhhK`Jk@66Z_+nJGRUH z046Jhrd+NF=q5}0V@(Y`drhd++}&?#-J9bjQuz+)zl&^mdR5-_HPgmeDSNbEN)&t` z_9z@z8%N0fs(s^Jym|9(bN~AFV9Mpevf=7B0mr)jr0WI>s3YVYt@V!3)2svvDDnSwofeLrItr+w>(Mr_s`#d zNhC%ZPCjLTiLkBei=F~qg{{|Id%eKPV%VnF(A2~k8JT$Pm6s%%GsC2L+__^X>Pyt_ zjW^#S(Yp052l5jsUtjIXU?Kw_otxH{aljtFAV%_@^I# zz}z{r1sVsKF(ycD+)FRNLL!ZmFl*+&M?~lv%y!0e5#L6|VqJczYUUTbRsFP6ti1}0?i^w0o{w$U)SdJEruRy1c zC!uAVuu3Cyi;D8FVaqqzu=O*k$9pkO9&#o^Bmqt`)To5rLAPFcxuAKSF}BBn7h?r7H`A<35eY-%@Yq?4If9CTR~-GX3%Kv?u}AHmNzZ z$3*Yu%FE}a&oz`3$&#YEU#eM{oU=9rzb%fCXx+j?KzEw#E*}3`UG$JtfM-Z8qP~*u zIa3xZZt(-_)p zc6@$k@eXD-=PKZdKvxy{R8j#yv3jwI($(nB7y+qkGE(v7w)G~R@N)u06=2>csd_bf zRnrR}Ct!8Iq+xzb0I5cUzSy;|sf{~%_g0f~xh6nYvBb?u9nc{v4!d%)@Y;qI0!3wq zf6oC@yEhc;#kLD)|A97=ruot)tqgGeQsC_z0kM}gX?+Oj&Xh>hTcl3!@P^H#9&Qu# zj7`9Q=?CyX`B?)Q*NDVc27vC!$rPZu6N4q9c(A0D-Y<~zig{DaUz{78w?~7Ja4b(t zHg+gDo!22X7~OE*lOD2n-)zoX`R(Z{hjuy+K*~=oQVup z_@t4+8U=idjK5=aoYd@16%f2s;FkTk^XVl303ZNKL_t(gzuJe^B;J;tfpHSad|Y(A z=n^8ehUefn(f`V3ZOmA;hVE?9WUqkZac#RBK(5zPzp1YqgoMf8^?U*HE3qs!8Q%%e zZ4wsg^a$8(J9gv4FTTTy)$0yLkNV5AVecMYq^^8#6Pc?KJ8U&mpd8SZ`PH&{6I^@Q z`DoIxLAi_97SJuZ@K#wkFM0l01KC(+o8z=FSLFHO9C_XuB0x1o;OF;qBQRU8>rSAnp!=OEQFvQYR%*1d>Y%Bk zzn&c_i{}9*D~hI?{#7!fJR#uWxMn%HRqFk|J~`Sf$lp5jfap>j38vTjWO}4|Cekb? z=Ljs&wAJ3-2z0&Mzn(R^(m+=&*Y%)A4eDWV{}a)xYbQiT6ob=}R$D+6GxL3lyxx$Eoo zEUC#mMoJo0bU)Jf-7sVxydR0Raz9Fv`%zQ-7D3U+z8`5l*%zc1?C-~U2Q3g*Uj^6~H``{Z3TA1kHK`!%mLKy%p~e4(V+*G)ir573m#3SqU*yKW|q z3@wrf-$?u-8)vj$Zc@E`lQvl2c{K~w8AH`g~5-18wah+J4|Y* zx0DPD`uBXHq#xEAwdWt7W-?fWA) zSxr-Xv7}jUyVxEY!zDTUG?}|HVG-n zzDeq`UnuvKedfAx#;%)BJs>HmvrWx+-83Ybx-n{aGP=oTCe0ji@5gn`rfa>I>Zb2q zsR39a8YuL>W zV+Pbk!|2*yv>*r_uX%74H8=Ia7GN1<)ob_VX2PPzQXYp+O6 z-K98f=ui`#JMEvT28iB%$0!5^2jR)b9>MV^_QP4j&&K}!d-23$kK)wRh8pPm>dP;f zh*=$1fGnVX`}J3d5Ib(T@n(U$lICRGSlHyHV(1xX3Q!C)pt)JI=6Lv_2e5nBE(2IK zU2sNvx};lPY>J0y6tXUqz4YR9CL*_Iuihr5^4DK|j=p_Q#yJ9vmn>d*1kly-Yjp4E zyY4noz;C|s8kWnLh7KEM#j`(BfR zx~j0frhhLq?XuRro%Q#0^yqp9+PCY6^pqw@lL$dgl~dC4^D_nJ&c~*$-y zu(n$-1PWX)+cvFVZPG69+n$GheJ??`uDy}GHx%2p*ag@hei2B^*npjT{>9eqzoKK0 zb~yL4Yf)G9()5c}ZIbm@A|6c!QA@Yl1iGaMS{m-Iuyw#cwvz;tmiJAVd@l3_wwKhM z3zP+5-9m{|+#r4C@g6fl-((v7$EaX%WoqM8v7P{D%qJ=J` z3dF60WC2C)fslVK3o|dhy6AVqpu<2{BVR_omtbBh*EVm5 zzI7TH*tkky=yOu{R$mV53k*CUSvj=k?llsHI-ox30iRP4gTP5M=7=D$LYAE|B!crY5i0 z_=wb`oh)!UPVVoTxKC3+uN3<(7g###V1%xwGQMNU3~ZBh%t;dUt9|PZ`M0FspCroH zM286E)Nw@1_v;1T_KO$LCC7XsX_?=XRL_AjCWYCVh507hb$hnJ-&4+!4KKkG?V5}J zQbYGdIp%!`m^*KDt{o<-u=>VCxNzFt2ZGe z)ZCjCLcg+p1vUs2R=AoWed>4>-YQ7ew9wi<+Pucp5>}1=*P^3DM>jNnd3x<)tjkC< zuvT^3CvaNFG_q9}IW`cl3zYtKS8?i9t+%UVQfNM?USnfxNh)opeFVj(H$VCk|ID0s z80N}7N7F41KYgHC;AN{F1@t-frqL6ib&54@b(aq9F!JWBD!q7Z1KrXcm4>^^Y@P78 z?c@sWh?c0RSK7s)ZFr#BP_aYOF6*NADoJ-))fTE5L7GNEvyN!A@A&m8_+4t~mTi90 z(<%0;=wx4RHjwF;6k4fjcSjtr5mFycjgl0Q;R1C7Og4>Zxi4#Ejz;M8Z7Z1%2Bx4* z!~8=)SMS|F-n-pIB(0Tl2@iiQ(NQ}B%>uOE3RUf04N*d;8e~Ob* z3jtDlukcXx&yX$>5hOrUBUEVb=3PhC-*dbuRZ>!N2G%DbSX1(kG~on`ZzcQ*oE@Aj`}jZW`Jx{2%Y7v7YtT>(;h zsEtJC_C%MCZB2^hvaL3OZs}{6hP#_>O|Zn)1v71Z%Jr{*ahrq)xN9AYma-dM?;)!t zn)boEFn?^7m3V2P;6FG-5BB$vs4ymgCVZP`?X&)eXxEdRJ}>!j&)T-$f~XCGVky z0)De4sy9ad-B!{B>qegw+UA(|Nqsj~pu2iUkcsZq`+>d}>)+&_0(149SK($wP48j4 znd$XEVx<0ifI#2k8os)YbbOm^12^?vr+>@8m>GfZ=0=+Pp57Pr-ME9KjlTKR6tk(h zxYljF+@k{U!L2*Q4!e0L)=kk5OUmV|2c+Q2{%OW$O&k51^cf`Y;=`q$`xX7ujUBof zO6|S9RLW(&PiVyKtG~yXO;?%*T9=zLvTZW1EZjUKMQYYdYG`?n|9L?;-uN@dY@ohL zpl;_Td3fo!7y;X1=p_HXPih56N)+zblJ@$WIZ zYn}I7>-*OCkLy`5gEKp?eeJ#Pn|>$9iKQH>*t9QDwm6OJSAJJ}6_@@3#=2<%fUERI7g3G)z??%4k(l{M+;K>T4Uc*m_L{~p>*`$8XODYm`S^N8n zj@7P*G85X_l@Ru>Lv>2TWqrHDS7*vvR@bR&Fy(?B(By0s^-qi2#_N;_yk3xx)1G&ZDk8-sz%CIET?F z!v3e+e*X^5o1Lw?YI{YE zZtkT(QnDy@xm=?=M z!`&m1U>ZZ$roK3rbb59XqhB1mzcdM#eDro^iGjfT>(M=9Ae}1Jp-ovVLYJ`#qGueg zc4&TWvcoZj(cGt0rA0rv<+b)EcG`z`lkCy&E!%Y>WeL^s|LlG%ebl8^o_eu83G15+ z%x@Mbg2cOJO^MX^h8`?9QO+_+dr_7P+8%q##w;?zIn z<>M|T#?eJpAOK*KC)RxIu<9hxvT2v2Gv=q)=Ttc69Zesn>K8){#5{WWTC+l{#IbyC zx2L7%w-0M1Sw&U>Ohqx5!IIfb$de2+I6cjh*_wo8Gf*mXBNX++!9fl&Ej8Y&Fx(D` zopgLSPqcbZLC*zM9`XmddAiG?DD>$vkO02AW%2e!k_FBr2{c>nLdd%2@+jWiDACYtS2fhIFcAC!@OwEFV9=bRxB)NH6uURq&{TDwqGKHO%U#q2PeJ%ZM{Ufcp>#qXJ z2$S7V9MsakOqvc8D@EaU!kagQRqg8V-G#$PoXq0(@t)U)wJQUYk@EUc8(XsgyzESd?{GF)lc%rQ z2^O_CYWupAk%_BnWV0h{AJe6UcEro``6FVK^oX_RZoeqC#=!c1oo*=+Cf4mZPLVck zL0cD*a1iq@F1eB|11Ey4=NSlFb@0CC#nVbb<*$+M8WT%m9 z!>lFj18Yhy79UJrMyI^YYtNMW$el95K=v(Iuk;uEA) zJf!Y|vcuopXf-9YP-#=gp!5mMos)6tl{-2b=DMvtT2vUf#XyZ2Y1t-WxQ=Ra;D?|V zOmgNj2NSQoWzj{vQXe%EIk>kHiQd?`eol15_#=kkIj`Cg3Gsste86DUe zjb?|YV7TbjaAgwF0tlo)SLd2>&aIZz1evd zQ5SC^NBqe)O-+)t(DA$SB@fd+FSUMRi}i}RilajeNtcUaX-eiW<@md+{mEO>Pxty2 zDuckoa2F8)$Qy{p2@KPFm zGvXEg%D1&_72fR2WwAQbJ)qJHRxc9t*xe~u3;Sv&FFaT5CxHSlvM6fQ8$+HBjuho9 zv8H=vpP!5Sc+-7UWztVj6Ov>v3PEIay+se04 zEZPyL*ol8$s@ZnKB3F9nu{!lnifJJg?f1VsMvvO>{`!uR*xZs|9?pgnrh7--vo^-lZV*xm*+B$D8<8ih?`2VxIbn4a^X7}8fM>I zH7t9Ys0x4dydIS}zH2f||E>9EEzzSJ{8AsnIAT3oj4!?DMl#oM^k-b4U1jC`EU%y!22;t5^vei^2t&O;!=m_cfy=7(A}4ZQRQ`JS|nFmkH`K*KwwPG z#6ubk8lHDKjJSlrL)$iTMEmF3Y$hE_KLe*oPM61^FLK>ntlqE^VL0tgOQpPqj5K-J z(r`tYSL95jmnkMpq>5WDWCY?Sqf*nU$9~soZUSu=gFfDd|F*5CjFaApups+E=_HO~ zC@Y!Tl6FtYUS+V~#}&=dU0hHS-GpSKa{1eR)tud@M$?P{9ZR`u>^x*xSlbe`sCJH= zo$1)YnIhe~J!A-}6rRHF8ydo9RrlYxSZ@l_K7dWk7h+45eU-_rWR_qo*t4{XNVtgQ z3H$KD&=6T>?ws!|OM!hL*GeHmjl3crF!rM!FvED)hQL2Xv^~i6NmtPYGqfB5KIZ~9JdRraJWzkZWELXwojZnM@h&Et7OGK+=u{NL zapciN?nS>56ru4^YdA7quiuQ$e=QYR&XGMO#jFo?htT-@`6k%%BtZT{Zq2^vT77l` z%aNXao)sC}(oa2}uG6ZpX^-f(f?eSc-_el^z~!yH6Qdqi^b4QH#4kl z_w-?*K|>HE)GbVG+Fi`W#U1g|q&GWfBJ<^l^;X>i*Q=q0CdVj7m26_a;*r{7B|n>T z$yRd#!ylD-^AklrN$yoT8TE3p`7^B^yxuoEXnIZuPTk1xi#^^B2(_(DZ2q9)!Q9LY zUof3M(@sQ2H!VnlTZczVtTtbE3ijuzOph|UOlu`K_D-P;7-;%~n%_voe9WJ2o<2>YuxvZ>E5(%wy0nGs*glIr)nvHTL(dd81t|er6XIET`N6MA=&{E;-y^*+vliYK~5WA)gKI927O@IA}DY*#_;8 zGea9Z&0=KQl0W$6N_co;C5?TU4WD}u{j~PM z=Dd>qSik%Bx`zvLesDyWI2T4N#dDJFMb{a8^<0);Rb54lXo&G@$zv+jzxtC?{~kZ& z4Z~F&nS()-L^ez}06zgFo1lpK)aP+5dMeWLj#aC=jr0ZS1sMUQN)sgK+o;T;4AMVw z+O@brD>iq?QeZ-lS5dYm+THu!M+AZS1h)z!ehv&qu}mnX8Q<2vQ)|k3p{PD>iMY z=J#yS(=4Vv%?0suBWZs%tz?kk8R)6^3aTv2u>PkvLHu5G>i4&29~(DsNtlq~8FEZ( zJ27L|b9=Uu32X3wd0QFWqpe#8uGqVi6_KJoPj5r>qz?{UBBY^3G9~XWX3f(7#7Ko= zQDB#@{T1~!9ZXSJoPUYrF3~A&zj^Ga=zri-qA5=kO0K*=k9ia0UI`a-=?d9!J!vvS z@xA&R3@TQm0dDU)Y7Je3J98IBPs{orl2o)ATB4Kt{0{z1hjn|vyUeF9p_Sv-Ap3Lg z2cf~YfN)~DpQnagF-K|0T+p;tppOgC$>iKcxq)fiq-Gh8Y*bWKW!vuk^XK=qUWx6r zBn=sOp=$H~)c(MHGYiJo#?d1as~F^u_<4{iBlo_D_0ZhMgW#%PIE$Jue>e1#=a`&w zz?rDY8LQQ_2{pM{fSBx)>9omts`oa~FLI=MA;_kWJ%T zpQL5N!?cnFFz-_Xw#3$Vr?(q10ep`^x6RzmOvCjdpd+74R=Ow7vk~b^Qzhb6zh)cm zi}RU<3SYn~Be;}K?e(^=s&XOa*G-xkJiyIolAU(AZ}OBfUf0XcqCs97P$!9x8LN0@ z_;*Y8(G8jx6^sf^EDL$m##V?mbx>&w`MBu9gM*4oyC~me;6vDYc{mr35Oh}XP`EgD zlgN!;{Sj7{H$RWkiEJB1hk11lFY(K3Awm|v)M_t%zwqMA`BvLsTy9!CrYcF;cUNgc zJeEIu$J-lVGANYC?ZoZCD>e3Ruh}kaAyUVG_s$MW2P*73doLoxgYQLi)On`H9MF_( z!{6J6T`^}H*KpQp5z{)}Od7|HM=??tK%NQ(*0mfjHYRDwSZS$Km>UU_UjR4`8LneW z%}-MJMGa1C8VOnyLd<<~Ns->368xR-hVyXu=y8zO~>$3NISD*D)E+x&ZI8LrGEV-AzG;tZ|vBc%I17<-z<*bdkG- zGb~&+Qb=LcXFp45@ZZ);I7uFxb*h{1<8ue(&bB>qO*Ocu`SD*4+iIuJv3)`vwNh(Zb3B*h=xWnkHkT$D)!!jqOkUj9Y+1UK*hT!Xw0Rg#@-=X5>D`2rx-wSzLYlPoVda0&bK=ySoLSn%^Hx0klsRt>te65juF_jNe+GfM(c6|2^0^h|d9^sU%n};7b`8`RD?q-u z&l}{jwsDi4dyrrj_EKAHdMgrrHn(7y{9@w@o>{4-|1-azuDbWG=KT@FJFN=*@9IL( zc8s#BDJuQUH+5ycbU0(~aGkBuhRX=Rp)U3QUFa1&I|^QUhbOG_q5f`AdaEzl#`2Id zB~&@#YE|StWe~1;iP9AlegEHVn0BE;tcR+<)jQQfT`g73ow>!K1rTnppp@k=_Gt5^ zzU2I;>2To8*cAWpa@=I$n@V61rELXR8#Oqt_UM+)+|z&PzfjJEDrJ_0;@y%K(q=Nf zfI03dCf^*vs!Gro;TP|?p$fcZStXxb5<1k&{D`R;18QmS7q?D)u1B2%)b|<7)m&;dwFu<7FB~xC$R|~uNgASR)rjMC ziIKK?(X1Rb5rL~g8I_pxb|zsJ68~owAgMtioWd(hSW}S9(Ju0V(Hzbwy+fwJ`nXgK z1X7(z&f5M(u+?H4h7q|ps+7)4H62A59@$%79AKLZ&>y%Fylp8R6=-$!5A&si`HGIq zr*Ve8Gnp764|B*uOc*6C;mAvhJxmcFb9WQ3$CeB>(eb}wM#Z%nX!4RBAb>>`aDxeYOKP^Kz-=WSmK zEqmK7TZ<-#-1dX)aitvOu;~up1wOZC$ph=i)>ICk~#rp{-g03QqPz;nV~gRF!i>+U;Jmffy2Ndr1_o9 zm*cBtukVgS!sJM3uXs8V%XYz@@179yOM5%q(x~j+t{5bd1@-3}c3UTh-T?4@Z*(DR zd!d@zk_H>DL=*ngd*}7K>#Sd?MZsl%sAYNSxGk=42_tp`QPdtnukg%=%!#UIUEf17 z#f@ys14U~6u2+Dv;2&VlJJ{d>gBH@^_LDY*#ea9=1(#*-H=oJ{YZ-^BQCByl;h=4@ zh#{}j!qFxZU_jMKi6+khS6k7R3S(O*Ps0*4iHkPZLzo){ghcZ=uk3G@c zSq~1JebHN~gSTuf27)x=)1&#!MlWWlNy5kyN;N`Bg~Pj5{4ANoGIfkVuCF z8DfP@==%ojZ5_f?YmraBU4o}0**5PC#qn@-tWHN#byU72%WlaY_!J{f6_5LiaIpD-9uQitc&(9cFMdr@55?j=dPdcS zOzR=SFr~KU(5B31suL7d^}!=?XqaA)g>vJi6s_+uBma5ZFq8yG2~8V5d(QwRR{MlB-x>ycFn(fv|;P6*$*jU-~I}rhUeT&t* zhDkYc;Yz6Ejera%aK!d2ck(;LWac=22vJO(pdzK`!jzc#b^DeeMVzZ30BjDnEF&r1 zDa6Iu)gZdj?PT-ZJby#N8O=OOV&)PimWw92Rcw^{3Z?`r&m1?J9CEs#d9MO@O;&-I z6Qn;(CIiv8j*gan-307;(P?{qIL#R7Ytt?wY}biT*lWrSf|NAQ&5B)pf|awVn~~;e zW(l6ZQ-O~#EH)i(r+cX)(ldx)J2oMsdLDTBmCI0iGE;iMa#q0e4 zgm}R7{zr&U3xrn>-8d={=%>3OoU9@?Zu}9D4SHEQv!BJU-k}4QE#jWN{vz|KBl<$W z%w!{aijQPFd0X%9Q=sJ*T&IBSb5L--T(z0N-f zBGC8H0_k?O8e^z<5{F2iDXl+X!zvS+?MQpL9Z4*`B_M%SmN!qxc~WTojgxH{s*1?_ z{C!YIXH8h2zYp-r_tny)O?6xo4hb)!85Vyfe)F*UC|H7_p8yVe{#x%|0YTL`FZidHm?*LRsr_pvgSnY82Y!<6 zwvfG(IZem0H!+C%Y>eyJFQCOTFE#-MI*D@nEBUuB-S@)W1`5eG7QfmhfO1Ie&n z$0A={04+F$?(a_^;W1#q*oBW2Q$@CEKoE?ZA$+kb`>@|1FV!OXchTKYd!p$5lIvY* zsFmBMGB;G%@k2<%D!u%m#~+lM*KgE2sGjZh4>ng0F&GeUJ|+0$ylrPj%*wU-;78R{ zC~DvUv1#0hl7zcxNR;3_DpyIZg4bF^7owi!9Vy{ji6uj~TA^HcCJ*R<4DY>}t=jAT-ILB^X0|G?AE-059@0?F?>!sJMuF;LfYdg) zGC}yvK+*P)v}%=E!k7SnxuZGuPIZrVFiLQBo&={8tP95tr_d~2SThHoV;}o929Xs| zqDI!4mk>?YBfdYSshL@t74j*qNudgxdo_Wx z)$97TQ!SYEvpjG3O0RNI91`Zg*gF4lI=?2;EnbLh`!!piv0f%PnC0z?ggz)8JUbGkXXccH!C9cPu1UDVH|z-vVL;=yW6&fE`>tjUUVRZl{ju` z1O1DogD{_cFU+?-TwUi<=aK`_T@y~GGS#3tmf6LY-%FbD7J^^fARXl`M0*%@mx2SA zBwx#&@+nx0o}<7dUPOUgEATMoQU!>H$&P4O-yM14ReVACQ2j=Jfc3|D@n3Buv=NNr z>Eg*LTahT$gI2{tKBOf(HGgUA*++HLzn&A*`tm@^>n4^|m~g-|z>Gb2HA97xny&sA z>H*>S^7~eLAhGNP>*ox1J)5*8dDo|gGOltwf(*T2`c(BS z;EZQ@Q7ry`!rF$)iJPOiM#u%0u!RW`n=YKAm|fR^@0>tUI+i}4x6B$l*XAQ!Nwn`V zht~?fbQb)Y#gfY`WNKV~u|>yP8u6?*mGG_45{Oy|khc~uCLTUfIpJ56wUM*(CgsD% z?RWCK*(C|X7~N&Psf#+8EN&m?2kKGSXQefWEOZI>%j+A&$>;Tc>p>Mo$*aDJO%swT zW$t0 z@x)3L7R^LSpz>QmN;)0_Z*OjDC_n$K?8jlTQaxsh%*NMSN9_+GD=S~lcNa;%e&w#H ztMhsCMn*xQ6cSpPX?8oK8_3`#^twy;hrf8iP1gCd*XPUC;bI`y>Dg86$)Bu-8FCUr zdT$}Pw6?bP_1e|)NShZLO`WW`zJA-yk!G1rj#CN``peS$uHO%wf4B*(qHo8RS(KzW z{hAV7BE{8Z2RGB8r6SdYKyiPdL0QIT$x~&`jZuW}w=g!P#6TQsf#Z>4R^eIhRKxm9 z!aLTuzFwuIyh6ey6WlG#`thTBE;JRIc;g4w0;*Hs06_$+3 zS<`aLN>Mb_8vZ!$ek&E(dnTVC|D$$xpN_L<&HIX6%?hQt|KK3+h+DD7Q?bs~*X-F9 zlDu;!H_5&>q_9J@m{?f240;wLW|X!j;bLG5r}3zGWJ~(1oDeaa9p%`K^7r-Tp!i=K3yKb&mW zI|A$u4Nj}v--C2sa@->(&@fOKNY+TcH(enCkg+fA276>>K72rgJMH;#24;kBZ*LRN z)>+Ns3?%d8Zl(Dk?oI^rPP+r^1XyI)5j6oBx@*92s)*;4Fqh>5Q8&2{NWAlRrPU6O z=iR|{<9;>sNU57@xZmOTtH{p3;Upuq+MLL!DAXHb92}gSqjgYTJy>sEH)&^^m&g0B z_g+NKQ+BCiJ_rtHr^kK$>?{7b%92MS^c0d5aP|hF8wyYOI~1R?!H$mjRRD?*%^m~z zG{w;{KQ}j79hOj3psU@yc*!($dZ(fKW^=-Hx`hD0vIX#tco*zfuvp;12B!_Y2*=sh z{Keu}2wGvhjLk~vV}d?1{NZb-fB)xFOM`uQt;m<~St&o1FOn#mfY)kFkh*m~bcqms zBwWMeZ*PP4fmWu`7&hu)~XcTU(dwr)cIxRo9)ar5EacxiJ zys5n{`h^0tKBH0cBq1&?{HJ?bc(>^=?QQvv*G)NT3peu{{!=$1(Zbho9`I!kY`r{e zq^#JCtJv9iwKIS7Q&y`G3uc4KDt^04T)u9(rH@cL*fj=_box5$+^+m{{4H|6xUJ?G znZ&ku)wu^Y`2E_W6^up%Rp<(QrVCl~PU=D%|Bf8^-mYNjp_cu6rd#+57iYnym{-yOiaylCi zg~kh4E4{bpJB}@gBkpAlORj^tA|m=!`e?62CiRSdeY@GF#%@mD)vl--n`zgiisJlK z;qB%4KvZR*#x;EJ0-u5SzLa~FmdvE^5%k03VmDmi7F;T_#lgkq~Uv!-LLccPvHuzv0f-r)j}<}>{s zqMxOt`3ZOc>+`{1X`;cUR1;V8vPq8JM)@~T4P%LxaII@j8lRA+2$Km^5fh#vKTgtY zuo_+^U`U7slsFZ5kU;WKWoF*TFYAU;%c3zhHx>ovouR6Jb3hf`;0&w1)BqNz4#;gQ zBLKQzp9Vl*nYuL5O<-6<9sw2A3?`ztMkLi>>_qF6;sePJ=tfy&1Mkm^?NUoFODQp4 zU^Dq@NmysMxxOn%ft@q+qJLPFb43%eV9I~;wpKz}a8XRF3{6le)9T9?U%Bbjl$6kZ zws!FjbCxCVJ*QGgEAc_D_x=dy(9fV{HrLG}dd1{_3T&;UU|xD9PqE#;&AyutsEq;W z$ifO^!sI^R@~L0PB{#RD!ycw3t4)nfiH_a#bJ<01ej<(#cPx8e1KCX0m@ufA0p8yJLCeN|#5J$@?p#4mmJzBKO?201e(rR9v0NkQ=rlY^_9nS^F3sM&>=# zpmEme+tL*js1_n zK5-Y`&9k?!+#R*QM>ziJ_dQqn;0N7qEmIj)W(nq6&SA^oL|E&;zLJw0vy9~Jh37d2 zxD)Q{Jma>92tlQTMMH`ddTy6MY8pPg`376g6&^aeDY4;RpZfr41v_e#L2;^LEL`8$iOi`3Eq7SH=t4_RPVio^y! z+hK%K9UPv5o+*rLJzv-P8wKNdDy70`V*5R+eEZEU^mCgPqcVxWL@vMPbt$*~5fT5K zWLpG@M?lc{Gb1lSK0Dom`8+`o=6O%AAHG&o=oY@Zr zl^8m4$oWw?f-37AAkH|VYk^h4SHF=Jmzmld=0pFp$~Q|Sda0l4e!D}L?K(B^=31R9 z3GFo*OLT*^Qm$k4`UV+Q$5kihQYS~%!&23n()wvXd+Qg^_tL7uCDIxw*BVJA<$RZ0 zaXX1M0M>Ad_~RS@Rpaqx<8Nu1yGntP$%L7YAw%6{9xaNC)I~S6-UeCG)&=+Kr@v}c z5XpfYFx^c$v?@>vWRgzRrUS7OBnUno+Ar7UV+UiZd-SyXT<^oWNAmZJ)$^v9g7wdJ z+0@w!RtI&!Sb$E?PnMfP5mupgy}!k>x&qQYcXqXZPO4Q9_W&z|LY61eO#mLIfVM*Y z8M_mtya6&C8th3w;|E~@c@!RS!9=kZOU$l0EPL(KRTdgOdZC_aw)h&Ya3x5#OhGr! znI+R>c0+4*G>{%#%JLgQE~ON^?CGHa#A!AVh4+VAiKYlNd4A3X+gn0)mpISa<659`zkPS>y_!NOv|!Uxsw$?I&cjml&P zxrACrir&6c7F~P!l0b}vn1D{D_GtBpG4}mVbT>90DNH@{ZYag}OP;xa^TLDqOeOXy zvMMxW3rkL+71oObY>u2Yo}V(0W*N~7aad5z)j`=WJY^`Dn2MRo>ldmXv)BAeLHqp! zG6?~{f_}<6b(^(s1Q^}?>@WTtGEvaSj`-M~Z&qjp^edCMwtKQ^D=9aAV zsMAWlWA6F=^P+V|Z~bfzwtGs{%K?k(GDY(%-$Go3?4Y<>Yl0sa6T{Pkvl=s@FL8b& zy?h7DReIVY_k6fWkX4fVRTT4h4GdUORfj}X01<*Qu6xOmP)DJ5`?*Ie^BOb{v?mmp zR=*f4_**1TB)2M#Aut=KQd{^IJPLPBgw!egv}{Gau)iJoy3No&p5gPjQ;_)Xgp$i* zY~p1Ja1GR#4hr08Vmjy2cVBbarW=pRbpQFAd=5m+V^V3^*2N{Sh=h}H zTrpj=X+4+kh|R>m?*8soe94tWNI-yuC$>IBsL;tbSn0Ogu%>(dd!OpDbi=o^uBK>w zpk?40kl}NSh$-i9V`q1)LJi~AaZS@(YqQ4%kmiF#bh^&C);vAA*B7d6Zith5EvD-(fs`mSW(ggLHe76P*wfFKsCJa=f5et4?wCW9~Ea>X#}(o0qvqL zdu}033WSMU%uIbjieO<2p;T!|r2aVDK-Nl_k zFdbnNkU&W9zAktLT5dn`-(cP!KQ!>koLetv<>aR5+gc)J zz#M$kYFr-RPOhgXGSO5{{=-60zJLrzGGL~oc+a6BV~agiK%mGfG;o5 zBmwyHE+O&Ey(TeUgE2gxRxTu;vi?6s)>NZ)_C((x=rC?zX{<8<{TNd$A3|OQ-^073w(njs9G`Qv&dyFNU6w`k zST5|EVFyg2%ik09J(oJl0aC$wcV9XAIy0E3^)`7A8U1ec(cb92`7zdwz@+51_xF|qCon3bA6MjqezrjOw025HNf!!Mglsr?i(c)t@^u*<-g`* zIJDI`8Y~y3cSqRb8CE~8kYiCyV|LYB&*wf`lc!Q+*Ab6;EVsH~jD&M;S^6K|yPX~X zg|e>=jdocSaBnyOH$2TBP&0>(OjZGdVOQ5{4F(%mFHB3py@45CY`_%H$K6BJm8637 zj!SFZ_ljI2>Jylus0FXyhWLFCppY8l_o1-J*I*X*X`TR28L^Ss7fDJggcAn)PPz9I zzwjKMIWb>uYkd=1P=_z*SY>0t7Et>_;%o-N?=+p~Th6>f<{sp6-P}_K@S^1tZW*0t zwUy*22u9G2I0f$R$AbA3fM1z0_U{Q801sQx0Wwbr@L-UL81_F&pmYq_WReBH)V1wL zHeyK>s?Vn*1O&5RTz-+JoE*XS=Nky6Kyh^zUNhI%u5twnLz4TJbIZTKfpACRa+brE z>xJUe^)UeRFNG0%>$*5sM`vQEqf;!$TN+hYk}DH|9x3+Wt6y-UhHTMRt<`K_uH@93 zfYa{F6g{8Y+9LBQmKRDX+?)>;pQ}g(>utMF=u;`83#0Q*4}F-msDyS@dhMggveZd?9F@k7;yK1lyxI^x`J__}p*rvVUNNFA~@3 zeWVKXkQZJ&{vKV+9M6tB(xUlyRuKjb=C3g&eyC-u0xds1>IT=9XuX-Qi?G{bgN)Q# zaGnX)J!PRC5mxGc8B=5)xC9Q#-n^Vv7^^h3WlJX|h>UpkEzh}}w`+?-e0qG7<{%IB z3{o7}KKX?Ju2=Us0Q}PyX- zcZ-PlI6T}x%=xY%8@i3*FO3CV1}8CFx`A@)P(s4v-mVo2$GuHROS7gZ^^QA`I8f`y zyVjh5g*D-7M> zRSsyyL@0qVLD)idOq1!8Jn;3D(f*tE27vb!Hft^@0%-E=HEIftSw3g+LF(G>I{=(r zh-Ac*15H(S*V$GhnrGgA!<`R7PA;wP+nA90T?jfC`kSS)BpxV`Z#}eN<(BP>vs2``mohJVQS?5dnOyb-3Vp5Js&eJQ`pW z$bcR&S!Riv0=1JP*mu(7tctw`OU~&VHqN#e)_@^7b$?Qg*|1GfYxAoM60UberOYAT z^{Jz1ujB_A|MZVv8FiKG!9JebBmzD4Np0EY!j3AyD1_^!g?{rsTSwb6z_}k$>&T*z z0_bPjzRWdu5|B<^vW$7;6;KfGd5O>hUY((14N$uTXXt+AE{Kabg?D`ecr43tfo!iN zMtB)XtFn?+XAEN_H$VA-@p__j~>|F1qiSe=) zp!<{!|K?Q~uM%wiv>Sj>*f1%H%GiV)IP1mjs+|~cpILQa^~LI zcm~Mz`Q#n4UB2Xl=lPU0Hv4cvfbXyJZ$ZGXi~@c|;DD-Blnp#+;DrUM`7r@jtZULM zbc_swV8W6ElYsux`#|h%z-T8e;@MH0%xwDpvOk#^{;DVC3dQ*SbUXFoo1`#MTDmYt z#cBDoGTxZ8omDF*zRKcYVkxU+xsr|vOg~8dG z#MHnuL77#5(_c7gdr*dk&)y(NgOY@^B8nxTf1B|2wO;+_+WI05Fxdb92=53DzI*-v za;!N!2zqrUleHMo0t#P2`BmN}61ikEq#4`;sbWS0+R1C}kS23|daQ<2tnWjwzR?OK zh&2=zqW}2u<5a^J=!tZ)`xG>Md=U`|b}fC@)1CnFq4BexSnOE!(#0AGB-d#ReNCQS zC5+izHV*KmvT9=?Kd`zA;O$J{&FDZtCb{!MeE?k?+7v1_&FgEkV^J{L1a@>p*){E- zzMKG=ci4C~ih}Mvaf`M0jezrwSqP)Cr!k5VKlTMHWma;;x{1@`+0M z+Mv!&WBff)k{IE9j~AYew8GwgwgLEI|I*fsur}b+A1}$QU7kC;L+TCAK(+-A-5B=He2A?lV!NmB~Xg9fsd^*aXJCq_bNQ;m~O-i__U`~=3X=4(}wz4Z-*Zp z5uU7iI7XXZTmF=ZynTz()v>%x?(OZp*>75t#10r^$@6DgKEp>`%+B`w zQ+8nHwPrzyp9wO-1Fj)>xKaFlv#J7CM}}Fu31Zcp=tUBvh3A2$G=S&`Zz~e+2WCq? zD(DwIM-5gUAZmD#Z12MKYIrdfoHq?8(}h^(;=hNrP)$U>{8RZldOMvc0E=8X5Ly*k zVlvnpL%Jl0GG!A>+~h_uH6L%nkv|6>AsAR(-z>>}R8r)$%93n;0IScES|~ z24;U!QT{tdF)b;^%K&~c4OBj#{6K+zHe$Xu@7Cxzm2rnvR)X$SzEXlWgo&B})@bd6 zj-?mMYa~dkOk#Ks&#eDIc+Zpz1fPXwr0m;gp<111g(&cBrAcNgWR}!08-Vu?6`B8p zK$=jGZIl>0d^K0M=NQy&u6gie-A$Vb=7eYPR({BC(<&Sa9MxD{0H>+2{K;Pz+E*U# zP7f2`m+uEIyn|U1g{4G9N~OIM#ZBvXhM~x}|CtL7S*O)(qY>wIAJ01;o~NNtxLr|E zWD-MR$q~!)d7H~~hv)4#8WvC$rez+4|LjrU2uXb|890XD{$OAlklDZeRf%t+>w{1m z1^(ox3hrrk|HA1;cS8h-hA2H>Oo}8ds#$I62YbL^`kF;&R;zl419-;zu?KVKTiBn3 z^)wCnbh$xJppa}T`iqLaJ@r{!e}(AT2&_V|wsE%O(_-#WCJqS1rupa)vc%VEm}?N> zw|P(Lt^6eYwAosF`+w1ELtnjmSCq6Tg^KmR=I^rw{k4GW= z`oWFya}zU~2PZg+88^pX9U*!0#n0QutnFng#W5C6x@qhU1%1PdyH%-4@tNsJDaF;7 z*ObH2{l`RrF!V}0%J0{(CN3?m(WD64iBcd#z(%?uZ>1o{GqLcY;OG0IY_mvlMEGYX z+<*7++W#dF2{0ZdLNmrkF}vpOLDZTU4lG()iqELuV!Qglq;T@yO)6k04;%i2|8Aw> zE1scMNvV~9bq>|&4;7bL$XI3mS+IxMC2<|!jnM@7G}(BeR97)ErH2Oa3j+gHGE&p3%r zjqR^2q#JKTMZKd5j5qtclDUSm+K^33Z~Ox~`b$PF(S*=~p$o(c8|&wMctBoP&N9Nf z^hwh<>_82x;i++budeWU7Xd7OP zCbW~|XH&3aXbV1ynfDb0y;@?tqL$v(S&Xb54gs5+i_u(e)JfYcoHbg>p?DG zQ9H?}*V}Ev^eF9*34V%2!@wC4RhXDcVx8uU^*Z?)&dS*cooF8w-eFZ)c_>k=txqP6 z{2a-fA02YDcjtK2PYB+>r}oo!4CYCDms=hz#5RAP+P#QDj(47M@ zrBD`{xcF<^cg#b?s`o6v&2Ww~2-1aq|GSGeEsnVhfB5p#<}0>C&3JT%5ZdU6<_+(k zYP;U*bPgS{XdmZ`#H^V=-I7s`Hll!FY8bfKIt9$%zJJHG7T*-kuK(fkbb$_AJVLT< z0Mdsc8Qlo&NZH{}^~8iEXt%!Hs2+~ne7;STT)SR9>r!;+q{nHL7M(b+Je4R~?aPvP z4$dde{4y){vyOOV=|AywzOL_6@yqx*{@ALGa%2knme$NIlO1^0FNFyerSIp*mm1P^ z2DXSbXZdw=GuBTZNY6;?%7AkwtTQ-etT;zU^7vd^e09Rj5107-cVbKKDD971La0pS zJ0U-sq~L{?;>!xGwVi))fn@wF&n(g<fec}x&Xn3QEj)$?#eZfzF>gJ|7|+s*xV%iHT_5=rY~L|? zZwbG28uUqU$J}hL+`3Dlwkrp<$na?yFdL+aP4_LGyq*V=>ZS|`0$?U3xnv0xPi4RA zXbxJ02IO7M#!UIjJ;yVDB+H`vs za}>A~hzb6E7tKEKGGbH(MShuDzFK}KV4a%x5zki46jq*7?Eb&ld+VUOwr@=|5Ht`p zxCPf>39iB2Ex3jN!QCNvkl+&B-Q8U`?(XjH(rfd(_w~8`x~uN%cdK5#s{ZesoyD4S zjydETV|;7QX=@<5ZZXJK#ey5Yq`;0=&qikfJk^~K?E1#%?LV-i^uOuoOFlO$nQnJN z0td{GNMj<+cE5|xqMfur7TqHZoBkwYqIPZ+rr%(M^$-XgQU(aK&^b%rmB!_os?kcj zqR)eUfjO0nyR)%soC_oGHld4dmMCO;>vNE!%kqL~_d`UjYy5_73uHhgS?uztVx(5V z?Q+|V%FaW(JCIN{$hzw60n`xv6tKem+w>?g@=*3;CCWALZ+NLuV5OlK^Z4h);>Hnj zx?gw^-Ijh(H0D6Yg1hBt7FKH8C>T~jki8xhdPkaZ?7xyH|3U_w zF#X-up+zK+xf^+qFYX^ixC?|7eWQg_d3U0K7oO1G&P53cnfvBSiumrYYN8VQp<*R*O0RE|Du#T(k~2nGyRf;uQjzhVG9uk9?%IJhPQwn*WUp zmF)S6*Ogtz;F=#gDV=|gs;I(ToZO@urvWxo7?=akT6!q<<4Wcrr{YQQCP3ajL5M~@K7#LOIFjSLL;95+9VHHjP4E(p9emGvITD7uwW zaK*ilC{a%1uKEPiI1g}C4uUhUa$5QZ2YND$UWh;O5QO4QiUJIMX>%3U1FAMxUwgN; zcap$^=@phYK#7~_1swn=>OUpM@d1c{MAL?)I+FSh^sM0%w|uXr?NT|YYtgW;e|fwN z?F|8u!pBZT1c4>x?gnXf8v|Iw!CaKj2w}=mYsEC};M3NK$!wE3EKIT;cZ1!J1Fg54 zKSChgDAt5dS)rX6DoR8zE&$ry6t3HCH?r7BpOC^tPu+f`?O2+&)&cI(M(i2k+7mjU z9uMw^$3Xd^Vd1UOJ>~zUUtaIzAWG#|_bdX3T@S^K@yl_?6+KkfVQdl#7WRhv!6-slg1<)4;5J#Z1_0e48&m_hZ5sIn zI4}W-9xT8{xrEYkpa9at2QxyxraNI{SY<#83k$0?xnc0$@57Q2iFu1CnQ#tTBy+({ z^59uBjmaD)$K`-WmxHiW{z&>6UB~SOFGniT$R8V((EnnD`@PRY>T0pzc3qy=7PyMi z6wJNM56<(#`QreyQ;ggOM(2la5&;am)S4Vt06Ndh2dp$yACNO4Wfb@oV=QK?Fo@jF z2&T;!^#`~&u|C8vKR!J%{S@;|V*43*$A^~-q|0QsHJPqvwX6Fpp2n~%V>+4BH(_YpW_q*;Z6{;+*4$?J8)_kt80TO#Du zw)o43QMIwMU)9d;13lM^R8FtmujeMR*Ox2+i@9<@wslJZzk$+asPZQTFciQS`A-dX zetvN_Bb(0OlwA(sn) zacW&+H_pMyHP4)DiXNCe`v^`-%*v_7ZC{_#&IF#Csfz?_P}^7FL4psz1LVEcRrn9K zpSg^Qd7ri^A?oV^?8dL6;^5c(gJ%iJT;ts&5A6H>sbV8ZCk|jUfH$400Ds#&Rf!+> znkoO2p#JXx2mBP6&fDA*gAWHx5D9EXX>!~l?+7AdZ+0(c%gt_3?QLlfM9g+m z?C0Mkd2Z)d|Ac1{{V!S5M+OEWhL$T-g^6)_Lxx}Zxs5Mxn3*f3WLXp6pV{l9q7r_8 zUeL*~d$>LhYk!h0`mbBwJ1aKxrLd|?#J9Ep2kop^=S9DJ-@f~eDv6CO!6+8NeASni zVlTFQ6A}{A4zoEl?J%&B0cx%csJZ18UGG*u*xo8`xH}yHdEN#6r;I@*EiLVhrN?dd zd?u0K@Nxyf}X~=-2PEVUvf6HMD_QgBtrb-je|; zj_{wdrhip{7#U<=wF#_=?xTD775_Uv0C0o#lF3} zzbrASGSvRpUm$|7Yti z3Tg)EU|J_v#mXWgX=WI*_*FjtSifBGJE|_NmY+D&McScNlS-P_S1f@cA$EBIKnEGY z&reuPLP8*}L<sVy2OBj ze%eI*D*!+T{xnej>QCT@*(yX?v$=g^YDQ| z(#gd1UmuGD1pnh^%n5Kt1?fO}kws{OTB!{l9C5!YX-9&Eu?p_C+}}d2o_@*)q6zuZ zy0QrHiQ!=KKM;erx}5HI>%`eSEyKn*Z3+L2BFtaTIu;WItifK}5pKT=`Z|={6xI*K z#KbK1Nh3)>uk>@tHa{RkaKxq0g~TZH2R2~(^lkCKzzdiQzPu?N)(!ql>k3m=-4P$$ zMR^Y{sC4I$4FT~i4f8|%kJjr_m`;uH&*o&5d&>?e(!2C&fS*vO_=TmS$r z06&20`Js!z( zB2u|6UiaLSh>D7;XH8zDmm;wAX~7{)@5u%3J$wm^r<#FX)i3Z+R$0KnNvQw5^8EVi zsx&w-MX_;U{;%PpH!c-m=t1R>T3imtA*gX$VW+3_ekI^D4W6IP-cd*;`F3?t_(rju zV#sE{Leos}8h4e$`q9C#NvL$}I~ZnoRf7>>jpvVKOE7jw^l;qDg9qs8HE-Qf0Wi>0)QG`ivX1(MyPekmbq*zMrE+(*VXmg2ecQj1^0Rf@BPg$CW;^)96@c1^G2cTxYiXIExu@oNO zl;c(hTxmL0?yTr=d+~m2}|?>xb`xImWcq5|KSAB?7r^UKM@6WO)YuZ zd@dor+>-Lr)w>z)O0Z*;h6I;KA01_LM|DWP@#YKk#JfD7$Hz4t1cvgL-@K9vHyafc z#i#_>zMA#d4~eRzjb*EW-aXKMY2rViTzbi|8IMD9+z-s0(?UW^cZTKSXR;ud!RF<7 zACq~`4OW8@H zf*T5)uC3d(1-SP`rapD8iI%&2$;b1FKXotdC97|y|D6jE5FS6f8Gq^|Yr(3m)e7l( zvSiT^0*~0^{isRy488_M$LOy6jOgHa8M};0-_DT~j*5l^^S2+BzSr-kTE;SA#6t_Y za5SO4@MAS*n1kcz)Qb?VUbV&cW2ckmovTjK_0XL=-Vxm!kCPL8SJB0#9CR3``$+aL zy!ozFTm)uO>rCyf7y<-Z^y>S=d@Wdf$-lc!e0osvC^w@q4IZ~KE?>}Y=Q&3yQ@=vi z^`}KslkOM3BKiVe*%(|wVE@ZpOv7NL)pEqhb`(B8R9pyc)|>g zS79UL=mc~Q7gfn+LW`9q#y>MT?adN4E#?#XJHu*LMSJ)iUrD-s@W5k7YDbdkquAro z14&qs+)S$+>B;I>$Bz$1)PGY;a8e-m!EzwVz-DD>WJ-MYLKfddc18009HhnZx{`Yo zl=MPmat={A>=Z9JtY?^PJqwaUV8yc>jQKtGACTb*S zZl4d-9^J!I+AuSfD(WCop7oP3rw+ii5|k6==0ja9UN4F7a;B1!=kxs0&h|LqlwCPs zc;h%leh;_Q6Ou`vj(5E7&DMxigjV>CyxZxb%i}QuB^WDc4|^qcC#Kk9(ALagG<2Wo> zq$L(o;t$7S@G6QKNXd$z#F&NUdbFHD*B5-Ta%5xhl~b@c7GciVBZSV*-W7gwc5QIY zkL+56jeY1Sk>Q4B(9qb7j$SqNpby3SAvp8?^wf`ZDPUv+>JVbzh^r5Ty!=HQ;eH7+ za?>nweYaj_S7JjC_37K8Je@3iP!L4%)QR<;QDbF=i|2P-na%GcQ3^HY1X^RALy7k= z6V1cd^IPSQk{7GCbs;upu*N?#2pbB~HcnkbeJ!NZL67YH5A#9Ib(PYhiQ(w9e2v6UspOFP;hi@&4jk7GlTNeK`E`w`yf|8eouv;o+aiCn1Hp*%{D~x{!BHxn zeE0zbLhwK<(Wg(JP_V_AM+P8`WsKA7@;<0cXZJCL@Y@WCH=T~t?v7QeCNmVMsV&!= z@xVCWuk|DGh(pd#a&-E3xHS>k%f%SblXFJykk9pvJDRDJXlH9z60%MP z)Ygez#ckPkYAvE}Zx)c5SPSC}f7mQU8;ODxu!0U&iHzE_d)8`vko%u?&t% zvFYsC4KW)AqAzPnksua7`jHLNeQS_{qfDbkynwsK-9343X8EC1QDh%oT5;Zv*%Ywn<9KRX%xAP1S<*zp$H9DDpNbfKrn&MD45 zV!(awp`oOkHJH(U{!9-^Vi#)GY*_UgH3+=BvOX;G+9Cn<%hm1u439)wD~zZw@8M2+ zZ$Tofe`$N$TfA#ThlH!*X?&07JW*0TA4tQJ1GAVKX@Hgfg|TFi4Z-2~;3(ZFK19rV z{NY)%7Q)@dPB2w_>57m<2haBW!}N2rJp6i(X5o$CrVz1i7T5yzI&j9ygm+DUmJ99v z9-W$vja~yB{iqlu_?k=A(mvM>sdq%6D|yaHlkvuc8v67UMqpvy z;N`jb)^ViQX+5ELti?T`0Bx|pU)*WCpHg>1pYhS%KLR@^rDk&(QbRq%K9E)+O8p}z zbFhv~hLM>IlY=SV^V}&8Vafv>3=%HX0jJQX;E@DZwt|W_WIy}jTA~Ai$aGZB54*^@gZAs-&ae?^mTI7L=Gal3_! zz)zLR5(ZGh_x^ExPkQzPRt7`R_C+W;*3$$80Cf_dHnCacprR{PCJ{Pb17AdeZ252C61sLZ8hh5wr)WA~rk_ z=tL^yn5Cj9KMKrj#;H+I0eTyRCMG4(V}3KNf>Cx{e}Q?LYbYuHBSl=S3m5Hg4J+&I zib3UAwXLW$ikRd`8_2RHz#SIJplj57cR4!>mlU)hDEf!ANsw!>&YcE+5AEktdGN57 zS7NTFj|rRfnCl~5W$kEnX;U={n@;bHcJ58JO$G|1Jzyfz?tTnUk3zfJ_blwjP$YI> zU6mZL0)WU5GKl2SBkX&8?xn`I?rvOSJxnV*NEs2;kJRb-$0?ss#)ly{nNXcG-UP?3 z_RzK zr^Fmj&VZmrpV_op)bxpT?e;N3i$?x|H^%o^lKGZWqJ@>rj$=t(19|C0R#?t*^({>( zr~f8{q!K+kPcXLCx9u76Hk *u^B5C!OA9n(9G!>yxEQ2w3VfjnC&4zK+Yy3{ zxDaSrWT@*F`xvY^wBWFD!0J?^%d8ni4yco1oC*a6Se?*E;k8O~s&5njO#QsGR}~SV zz;r(YP6{g%h96t7;MEDVK?W`!ttPm7n(O|^V}Flmk$m=XaY^%RxNwA&ol0)01gP~= zX6UbOt$DY-;cND@FyEL|1#VCc!|D_*!GQFq07e`2-FhamW2u4tAAB2U`a&a!WM6ti zr*V+HUD8`2vTUnwu&6Edpp8|kulq-T6HXq<;1M<1cDwU7O1<%3whXv;_i*5lcpQWcuGPz_wE~H!*Vn_*g2Q-&`Or z7vydVbOpSffs>TeeI7RMC<#SqvJGD5>Y!VAm&GI{MVVtb9Bk~`fQtQCjxr-~;6l0U z2}BZSl$VVu^oI|-e^QnjM(?x>L{GMlUP$;_-w487`vf%N{r>%1`EJv<%P{7RjD$?~ zcz-@NXI|^QEpHv{@o@7)d;cP;APfm9Edtw;b!uddj>j!)bKI)c^1jtWu3|3w>6Rm3 z;hDn$T3Cc2tlnfRju-jld~2h+b%%i+qT4A{lN;sEO6#uiTkHy^Tva-SdKB17n&)M)mRWVX)Rsi&19i-Dbr z-l?2iQKqC}%Sn+Z4>1lO-I~mm+2xhk3byC5ydxxKr4yEfrHpBs;_(5owGz3*5y_p` zh_a+}g-4dlPJFf#h?^*;6QcFAK!IQSLGGlJ%RG{lQGED5F&NLGNO?R`l71#;)LkQJ z3J&oKDa~fop)4;}+^C!*=6=YAXq6KANG|eTgr>gefcD`qB3WX3`a{E%>uygZfAgR5 z7B{QN1dyCYc$0h7c>PL5{By$%m0gxiA7?tqfLGexJg$5+d#7^LToV3!iwfI|vuM6C z{m?g6oBA5x5}Doe2CG)h#7Y6x+E~r~zmj~m5-xZ#@!J(ciyQ1g_s7R063Pc7dxn@z zk>lo@SoKNtP$7W;(kE4?!zmC4@>jEQ5N#AXptQd5ljL?6=jY{4b2L+h4rDQ`9{Uln z(S1n|gWUJJwOGuR74ixS8E6g2i?)<7mwQ2$p4u5?R^L^So_JTqZ0vMfdFw7>@uCkO zw`%#yes1Ava(E!-$Rzj%=KW}TSTh!?h29?g8)p7;-1D1nW^Gu*vWtCx2?YD+q#tWr z@n;>R<%lOCcgeRu+}Y23SNCRPwT{TQ<4&5IG&;HG?JpDr+%|{|E!Sa@HH&z48aq}l zJxda^&(ay!!6zQS$eS5-&CUIa4u7I`hE3Am+DK)1w8J7#RF|r1gTz-$l5gkttn97R zD~5aXY-axiQkt0efBKzQWgAc?Csn^K0i2vLobN*YcHK&G@@?W15+_}7Z`1jy`t zEl6ua0Kxfs^}y}=9v8E@zvJ`d>r!T@EQ+d+&KVladjrzsRom6(GQy{jtxw@a)wqM7 z=Zk#~W8i)8)xLET&W?XF{Zy%`wT+3jvwl)Mae8WS6{oqKQU>Jvc=^$%o4j>Pi`>c$ zmC-O4l5SOzp=|O&x-y&E&E_oqYIseO6=N&I@WBDl8H$3IbM>;)-(>iVNoznvG7mz zNxB3YmYleO4D-p5R6CImMbd;|ZNuo6jRsv}YYVQ)&^ zuK`9{ahXBON5|sGjno_5bZnh^BaCy9#HnB_XjL#fxBld`D)~UHAxA!c=d2(A&}YD2 zMqJ*j&RlSYEi?%P9o9C=w`w-TPV_q4G0Zi&K+sul%OnxEg-0-Zlv(1T^#(KA_&kI`364v@;SEHInK>~_n=c@k#nYBOF0S@RUY44%9Hp8e zRHLM}TIyuEUw%tmwOn^7?i0Qo&;7~ON&2G~J@0;f#?c2F=`C$7ZDVLd>$qVZ2s^g0@fl z4>~CGIT*Yyj?Z&uaglD?!R=436V;i@rOS(t+=HGEZ}B-CAfrs4pFLUvc`*KL6;2mK zAAhzbDqvx9gO61vq=rHSC+F>>1EC{;B@~tyfBcVrnjHr(xR)^5r(d=8U39gesY38T zO8u0f1!ljd&Ch)*n&Gy4bH_Dr4o8FCvbD1Ed^+$|9|d?!R@lplc=DA_;3P_HBtH8~TvUu2 zwcn}7J;PNvb|5GfIKJBK`8^ofu`z9chOIW)& z!5W&iLs>2w?fV#bKUkBYUCrdoAz^bKZ}D+%(A~f@ck?i|3Q7tIF(0jgYdq>~^m#~P zJB)H{Az-}wap*GXt%g#9UTixASx(gNb7dOGK-D+mi~QvLCltLNpY`cdMxOpi92)R8Rx6w$n!Bs?a%J9D=y-5mH4~D*;XZjG)>~$``dN`_SC^M< zyK6qy2;_PMRC5CYH{SKKiX_#amR$`9504Ia?L9ecY%i2rm+U7kZ=P{#X`R&tvO^q? z=S27BRbvphfX~W)zvQ6xow#4tOH5YK5sGd7-C1PQdislAF57(iZv1n_=``&~zTIK3 z-O787nvJu$8@*GnmVM`j@sXq~o~Ur4>{P{wsKns1QPu*wi_oajoEyx6GW%ZorpIl_ zskct^(l1wUwq332jXk^*zK%#p$h2RSw!FvFFmt=<7kKwJ&HzE{VL$`H`oiAr$i9GC ziR@~(`N6v$?!8GU<@Vq&f+4;b30VfsOOA17X2zGh+|d&AKx#{@ghZ5Z-;zTJA8=%} z>I!uQLM3WL=mL{X4N#j>Ck>ZPm2f{V7u7SHBJf^T%x5T3OlFSj_1GUw4dlJT(ROwx zocHxrxrz~&mTourGx#;2CTeY)F-(GaE<+g)0SZbZEoX4+0sqf4%r|EtCQOTk^T$fj5Y~Im|1gV^zP!fOY;TIkxfZdJSkB@H(L2 zlY9;to_OSBGr37U=t?E=QN)rqBf7(9=RD$rE8 zW!m?{6Gn?eFR*kV5NoxBroS2-k47ex{4O4G|71d@Eb0VX;QHi7Bav;IfQw)uGh~y& z;%&J54z2K$IK|HBW3S~mZ+iL^r_ce60|~M?p8<_zc7F0$*;Q=`$Q1#jq=X4$GMTsu z+XV8Fh@I*7?)S(6_LLk)c|tU$?V@C1q$Y!OMHjmX!e3TC`#GzL!!N}e`_adxKmS!| zyT(+BmudD89ji0QTfAsgQn+W=0>NuxK2Uc%MK-#A)aaMwhsu zp!XN;rPbe5?wwpSEJuz{Ny;@OOuk_@AoJaEi@YcD%_VZrGi}?$NhoNtmimd0*>V=S zpA3QpO`F_s=S4QHxHrf>5rSrCR-RDs#K&D+o!7TscuK~L-8L2H<0P3qpNC}Yw*1Ah zWo)j~dsDM{tB2JXUxeAYxKwzeKs>gonuMMb>bMzubA{87#luE8hobHtX6Ox%C(1uH znLx!?anif|a`G!KHdUZUTytI4QxzjA8-D+InYN=RIDEUKqf*~Rnt(8_5U)pVJs0; zPKPaXu8SEnZm9P$05ArDwpsYcnp@S!#|@(04-Qx&TC9QhbF&})2k2B~x6kn_Z8NP; zcN=%-*xr2pzAVYmmNb%ZAD8O+ySmVE-2Hoo>PZQlsgYvgptQtX7->W><9AIRspc)r zd%IIlhRuEb{*F^I2mi>_`pb-=xX3KJuN+S%jx}^lap=5a*`7CY`%#itoa*YN(Ze`G z%;6xk%Eu7W{9EK~J6Nat@zi>itKUySP-K!S^ae!-;;xLe$u9L2v}Xr`BL?jpLHC!} zS;p5nM08Gfy3U+QzT~;4_UeBnzgng%9f?bn%}0eCiS;WS>nwQRcU@=O(cDaJ8NkAi zXU-*1(J?67{1vw54AS;EWyThzWM!vXghFl*GKfLWt zmF}S4%DC~0m+?AD&>p@E{U45?6jz>pQ{%|RK`+HxE)yh5lVP8HM0;b2&y`=6MP~>H zWO{Md4Mb#8W0RX#Ss9?F#G&`_(gTP;F;JodDQ)K4~xmV1DjjS-PRs9 zS5YVFB@8Y5r1cIBGM@#Vh|bQk@rIb(xAo=F{ocz08h-YJL`#e4-qKTn)VboHMOk+T z*TM@94v@{|8l{dGgSaBS*<+7uuoB1iw+YKaTT?e3nYrOyo8ey43@UOrSdsU5nuViA zcaPH@f5cY_zq;4t{fhjx;g5qD{-d!Z`L91um(b3Z8 zw6OPv$5&u@e1)z%3NwK-n<0#iUN7<>BTH(BMOYEkZz6|5YC=3N(PMWXhn&O|B_PkP z?;8%vX!!y{nI6NaU2%q0uePvVcHd8g1c5RtY}iL(1M6VT%wbcbWXET`p^BAx`d4}~ z^uG-VIFiNiY!f!oQQh%rtjjHk)wR;)ekTyuR8PjJtLBNC$jiR;*)qC%u;PsIOr5CT zdG7agq|TC6QFJsQ7d;qeXS2w0?g}fnp06u$wV^hYndKav-FKzWjY&|vYN@3Qw%tXa zz3wV*Ff8Gf_};sxv#qkij?4C$Lj&08Vqo_RuP`Fd5)gm_#dgIwvn;qk5$Ywg%NZH2 z`8d>TFH_2!4m9OMp@Un}QW_f>Z_*R6d7ZrWJwfCVL@w*!_RSZNh?=e{_B|U7=V*W< zt{LDEOnmBYCklkl4D3RQu9mlaK#Z+V&y=%O_OKoEHS$LBI_}5tc3IDLbQZw10%hhq zgpuXMKPG7EOq=@iP2z9n8v_G>=cF`GFgi?H;f`1gx;|+E$=N%p)}!zC$KsL06UWYQ zpL`%sH#?ETe>#ZbX8Wh6+%&knSEshq(4`&P%4WkHtya~)uSyhSRn2oMF7K;20F496Xnrd@ZP~dT)NTU?XL#6M!Y3O)2MyvZ!({ZH&mKhRt4?$CL>&S z!;~^?H)089fvn`XZVN{0FFWA_5`KmahzKBhXp7GbExdFO0O|HVEq(7&t*qbsW@9-` z%?!i+sFBtJ-o7OuscE_!|GfVMMQ=G6dHp2YLV~&oLon~xKGB)H%tqB6)c2$4*{u2g zO7g`OvDnAQYIhf&oBaj{R}ogK*#oUXo?*oht;=!P#dmMvZ%KD0g>+VyYZ0FY~SIg)ZF;HX~8%>J#~T?Yr5PfgA4c8jtulOm*UO4IyYKL|Cf(W1J1pCK%MS zv$E;^v65Es+z8_wzo-QEgS!4;guIpkie?c9vLT8%VHk4o_WTHc6UX0TzWviQbjM4l zrGyKpD=)ka__H37tE~ii4M1VlrNpSp#Xr}bBdzU75+e=lT!YKYr7Y zQ7+eZe{cs@56C|uh5{!I!D2DOmHI= z0Vq4}ZndQz_78rqrDu@>c~re+abhsS)y*ei7Z-~8Dx@BuI~K`Jn9B>75*_(_N83X% z#`^o%ptvq(9g*8T%XINdQC4Yja&cr+y|Z({%P||7-CdFp&DtY(|3ZP8@rZ9?Vc~xB z!=CC&As_PTg&=A*pMJjU-R@f5(KfYJS=@~~-lCn&FL^L5o&*_x@BvdgjL`nMGe009 zA3JD1f6kQXYG`N}3;wu|ywG>vEp4Al5w8%n_)8Mn)>Gqn4rCO5Ky$Z9h8&gnnu!mF z1IG7R(=MTc0;mv9ZC3Q9z1qQhv_5j2dbca}n6Rx6=320E_%i9p)-Tl_=rLRt!OJ`i zDYi5SD^6&YTny!GN_O2um@2gM5I#ANE)87Ulgy*Y*A5P$vHKLzf)oIa#;YH^ALo*E=wHyl zZB)Z_!}!-W>Ra?xQ;j6G>MZ0hT(b)-`zDW;;)nf;vlRxEEiYS+hy;_V_lwIq03#rF z@DBQJ9|vi2B-V9h+Tk?*_nzRc!)3bdQhJX#k9#p)v98VtyIEuGNxQ+862r|{abdGk zCItf}0t6}Go@T@_2Lc1Afj}Ef`HwmQU_s1KYTM6eJ1gvj7{UCWI+{Ed(KL=zvsqRB zZz6)gCUy=su}3rzRH!eoc+ziFI5}Wc$=?aZ-rcx+VOj3JV-Zv5zWMAr%F)#9FG?qf z4!I4ctK1bk+m5LDTG*mnt~&p-69(`W1XoAL(A7@+Yxkpi+!kFQLU(5hflC}F#%x}d zO)3HyN>NieA&LpyfDS37Fu(+cg!>Wv=4Mc<$qxD|`;$mx4KX(Bk2f)VHKWrXjlKx9 z4T4M87w<@X; z3(%=7HWK$l#JUO5qXwLY!mJpczj+-o0X+L01cnJr$nR2sNN^Zhq_h+Pfts3f z7&5-T_+K+)gahI`Ogjx*F}SMJQ{(eb!VaK60KdtMS&|wZ6_A1~FtIBr@?3a`w_vr> zZL@KGeF+X)UY?gJW!23ps-e6MTi*(@R_8){hrj@1;+jJ^wHbK(Lg{?rkp;nI4uH|| z-^m;TZHC}hNJ4frc+1l$fyuc1ik$HE@&=2PQMbPNHIYDskB*LA2X^lEekZ%HzhCgj zL2TVokND}$6{{dFvk`IHdth!rFv#J42k?zF09k#6SuS<;rE23en0tE@M8DJjM zKD>ksIwTCZ_toaLb^ZF#XR`2A9j`638H2q7d(&(BVVOGnfhp|K+(w-7aB2hyb1>B_ z)Shum9`Hi5`@csq01x+xx)UrJxOhZxAEz>!85NMoB^j`lvT`Wec>lOhl2@y*xr^p!)2urJw2p>OKAOine z0{K7h`hTWC|G$=3CV6;rRPg7ZekukARomHeS3XvY6~Fa$w5x;X<^}_?7wxk0a+TKW z^GmO|06aQ6x)6t9W-G7*;0N;$jQHUG-9*CW|K&@Az`j&nIw3e8>_up3^Ou~#rZXS- zA4~`Plef;xEjrMnmRC{9*$z*M@SD60p=m$ZJBArCg`5FR5KNS`!XRBch;SL>)V+oP za0QMyjR$P=wG_-GuU@De?2`^@sW#Yzf#0RAm8)4OQ5)vfk^gVQ3F|xsnseQxXH_Yc za@;<+Y|M!ox6k#n5OzzwA_iFfcIenXPQ>xaE9_OaT;>QK zHP>E#&kLr*Ey1t_0l4553Av!p+O9ITJlJzYDozwge41&R9UnkL?+xjSF%WG7(DsQd zDCpJ4{m;`nU;YbV=moc;EpY*U9bi3Is zY?o-)qDrb13G3;}7bCjd?EW%QEb(tlL{P8#p%PwhuHvqBC+)hi0 z(1GVW$_f0gt^?fH$N>&gVTH?0wwUP)O-*=if5E}$y#*Qva3}->ARAoakF=Cnd`~rK z8ji@;c-yQ=U?zWzASRa000vfpA;@v0FAP#$U(__Wg&c{CiJpy)zIhc4Z_ z)8ErrGCjsNU?8y=IpF&$t+*themFAY%$yT-2;< zY&`h1#ymc;#_yhPZf?%d9=H{uPEuxj@|Es((~O2gXHM|Zua}4)&fQDc6!nKmj3M1N z&~_@}D5@Bg2ah<~1k9C{!P5CJ%4h(f0Pa-%99V+PPJ7zDiF z@c;a7uWh9h{JChN_Dy~L-mJ%9GyJ)%yatn3fZrBMZJlto6*&TxGCV5Er_p#3!3^)o zh1>IS3>HwpuRj)QcBh+RH8r!6>b5cK5@4cr4#0Ja4E1f$4TOZX0uRBFFjgF1b^w856(CoaV|uqB{8NE}mx_ZtN= z2I_eUe8AAM_jJ2=ywe@k?1K0&`D_1cI3ZDCy`#TVWVWPYgX*MYUp`D&v?@Mnrfwvb zwzQpX^vbEpd6hXFF}Zb#U3nrPP}7zm^C@!Wh7_Q6C5xxE-uxmAx40|X0x1;}S-W4~H%ZqBVb%e>uL zxF~VrQ5ac(S4f)+SF;g;119?a5HLyPR8;)?;@P3@9{`$*cBCmqbwdslj?A`4nH7Y- zfj*Z1iPAnfgRh0Oe+Mkg z?_NC)6S|l*sI94%Pe{VFL3?7gKxyr0VZ9tS=fJ)ti43STcLUzd^$c+f=?eId!g2zV z{{W_69l;L~NIbTp-Uo!`_U1~$w(#^$U2)4U&B1bm2Uc)2BBbT#q43Iyeh=+{b?-58*j`zT(|_qfNWl)qvmC<_pJtb*rhQq_jG>qvKngtQ9pFa?+nY zr=9Y``O`dm&?$;P=3f~e?pFZKlIr&7|I6y+Xdyg8!=oepi99vDlUm$UvlXC`|HEs2c?9tj%=!#$%TlTaM+}P zSLvxU?P2*2)+C01YElw;TdIGIAYkf8XsZ?aJsxiNXQH=`y$%xNEfBJ@vdymBtY0jT z^B-1=62QiJoWOxFJw5$i%Vszg%P^MY{N!g=3KhTjR~-E2;x&x#e^>=IbvJa!@?1@} zHqR^GMPn$p4Gcqm`m6h{UlhU0&IqPK>fB4zOhRSCVe!KJT?M+tkAdo!2c}cr7mBSo zJ2IkPmG2p#>Xg$TiV0oQPujh-i)#}pEhEz#pv~LV1TpsTKM*&!9n<7bU?taeO1{g@}@Uo@!a(qM-G5?mFlw&0_Dsn_U7=JFn)( zsc!D;Br{~X9477$K6tGb0i$d5`w847*~mTm+Q)}YloJZ zDdo@A6ISQdBBdJc1;?mosuUg#JZKKXDrL5{LeSMVw0%^{%yXHc1ozT6$`zIQe>}HS zRUS%jzN8jt5M|ZBJnJG-FuW7)g+G>l5@TG*>b%2#vI7+$zcEar>*(-?9lMbs3UMc{ z>rbU-`|=6w3C(znSpT&((rI4v`H#&eu(3sIKEFChSnwqg5TI`FbkDm$W4W$7k~~7r zEhvnVz+Vm7%q7~7lh`Fcay1Rrn)?kU+LR^jYFXPKy;%vi~ z{`z#^2q`K|KsM*x=Up1PNTE7-Bp@PyFzfc0rr{3PCdsI0wihpwQnap=@Gb6H5sP8# zH7{?GAOHIE5r%c!r}~l{GP6|^@e$fW^+)3;-#XywD;qf22?W4flvSAgayp&UAi5>T z0Bv`z_ms{P8$t~Q^@O_>y`iMCUeIge0*?Ek2*Becc^(iT=?8g`(U>REz&^Y5HlH4C zdTDrA=ydOQ{grxjGbZEI+Da#L5awu$ytq8p6f*An@rD%|e8;BMhm6gOiT$>FBDthh~~ih1L~T!TedM6;t9uk4?!LpJg3DAdP@ zwrd0D?E+-ZF-QMi6&Ckei2{x}X}6@E(eI4)Y|oBg(oABqUZe`aA5XpCW?c$^NwitC zVIbDs2g9NJcRKfXL8Hx=W>&*+|Db(VlF9J|f%t$gas>4T9N1Na+R^G`$U}EO4w=Qe z0l02e0qvu`#+_3M&2$GUPWGp@Rj&Af;JFk^hfv{tOf0O8{NA08_I3za-V3^)=Cq(K z)~mC}mlP2PpxJ8Yp><+hH_F-=n1r3UUz(NS;D`&#(W zT^g#TjuKq_l)S8;P%UP=`NrE!i5z?qD&pY+e|*PRl0Sn#RY(75mYvSjmud#aUPncKAwpnKrb`1g00LQa665ea*Fg4? zWHMEGT1qwOBPJ#PZ4vmL&lu$l2O$VfKYn!B0HF|f;q5=FPIIVv)V6mscjAe<$>-!r zs1Tsv+#U~8hjo3veqNaH(Y03m-%UQAFIi+))EH1fo7k%CtEhLtY=w$Y*dNZyb6?G7 z#$T=eL?mFgRVxroc;j^F_79{_RAzTmn&P)7u74il)9OEs8gd+ zyW>uR@(YmZ;`Dhd8IY-?W9dH8i@qVp@^R&AAC5tTv?GYK4`Y1zVSnIKR!=lSac=-Szl!SYe1+#iXfMyDsx~e=y|NrA0DR2Wg8*z6xk?qo#9$5 zx8<+O?GouisiAjD=Lv;wDbv1>)-dHquQP(EJhz>yz_e9a^MlX-9yH!$HzLie?bUIp za4Wyg7I+=-ImTw1K$yd*z>|TGk0Bc>OnVwGt$I_4-ZS2-n`336zZci%>&HWF5B-I}rs&aYOl~`a?bjH)&vc{ilY|8~uj);~8 z8w?q8;`0=vvd$~Ve3>CIBI`xJp5PqQ-n^R|Z+7dv`xaKllbB?2 zxB_AGoDHOGH{<1TKou82bW#a#ULMfgnF)cEc6Ykoho+tm+~AK317}jb%bF>`hm8ib zpTLlmSEC}U2(tXrf7!(4g0VFVUwE>Tt}+eL=n_U<)l~T~_V?Gw9nE8*fgh|^C&7(U9 zQhxV6;fAs2$>k{J5H_GKHTu~ZK#EL(6<(iXT)U3hpye~u0b9yRKhZ} zUc+$Xjjhy_+k!x#?o8Y7q})yD-v^syOL?1115W%xGs(YO93q-MG=Sb>?Zg>$CK{ti zX8iWl7Fda6f0$1rh#?}hDosW6aVFzyt=o@}wAL(%ok-#c>XwN0SLj!#KN-~j!jS!e z9%34fU`?WbwXMMJpmv5R*EPDMXAUi!Y~q$s#X+gTDT&LUdsmjPmY*c3N+(BDZZEhD(fxXA|(f{GL(&Z*0AFSX6P-J}xaFAqYr^ zGz$nwr=);%H%KW+cZ1SM$I{&$61#MFmvqC@-Ss^@@AJK`_ZNS$$hF6F&Sz%siF;-k z*69aLz(!b#N`wA1;*zsGdhU{_W^wAJTKvgbO#W)2A8B z!q>&<2wfJNwjdl`O5DU#=lsyEMb$fUXbe@J!Ts)?)maE)_sXaW8!)ZWG;bfkN$IA( zUL^rVX^>(Lo80vY?DT6X(7aOwVLm0JF=;<5;WC>RfQA@y}$abK>7m4#WEiWJYC zvV>ZOesv!z@5K%p8+!|6EKfF#n+2m&eFrgR1b4>U)T+m%M6y;01&ZQyU6m)qlT!pU zs7|Eb3tcDgm5mtP4&_z5k(N$fci{E$sg)w0-DqeRVUeFC)-{0(ugv&kCasY9RaSHj zrd950PNl{-%vj%~k!Bu&Kd;Ac*%0;Xk_jKHD9j6c0prE^89xtapcq>>(dtug-ijfB zs$|0$Rp<7i<6K>gDCKq}%15)QJ#l2dK+o%ghW@l7qb%qz5yC<|?q3CW9lLi$gwnxor{U zcY`}7JpK*(5T#BtXY|U>10%8%JHFOdNw3+VEXyWKh#8rnxd!QI62~I$@L0y^)BID$Bt3>sa6fraF|`?s*GjeR7$r^q-RMbxf)q-wa9qGRBwLc6PvaQ~Atcumh2iCM`4z3#AZ|CbE&f@lESU zu&776+Ry*X1;D4Fg*5%erICuI4`{l46u+QAbtR0)H`~46UsB?!V-!oh)#2il@x`)%0<0b`}ix6b|VFEYwQI1VtI9X|B7+i2O4jw~iPEAJ0jnE9sOL zV#RZry;unvId!DFPb&b{Y<~hZvBJ0OS%$>sdg46$vbW7_#KKEtMH4wW>1fI!s^C{C ze7I17(Htg@%0!;IQ2mZ`D?{4^O?A140Or_m^_8= z&mA;b%+naN|5{VtjSKQfm-euS_lWi%j4PlN#W6*0ohYZ9e=2_l#dQ@miZex9DJ z-#1aHZ5gubHJ7f;J1#n!8%shIrm5b|xU?dtE_QQ(Pi{pq+#Tv)kUkD7d}<3pIhV%I z==yS^Spj?ywm^QHIm$O^eFsLHGMH)W;d{+ zb5&$_rSp@yK5jyH6Sr%4j`XrUycbo~>gn0+vdy*B@7c(1_JWP`37IKPO~&)tP?8=A zAk$}82L7Y|pWLDvJQOQ)zo++D>m-PanHjXU>S|qNTGom=!Dvhf#%_4gYSzGX-4ZMy z*1c1-=y7;))3*+t1C=T182l{dY-C(cY5~3mYTt590CNFbEQ8YN#_~^-*sj`q;hHS& z7Tp7SO3`Dyy6Y+lej8}Mq^Qe%zbCcre6D0_Y~Ih~gjzb0`f<*X@MhYC^8TU?-u4TU z{qvPK%j&b7BkC|Q4lA*W zjhngrua+6R3Yo&?E1gGCuu7Fu9AmSSGzz29K_((InAePf(>6at=417VN!I@IJpLqp zy#`K<#CpL-1jUEC*Rwu&fpu{MTbA5P(wpHn8SS-UU&LeD);}?!>)|`Plpnl4zKW-b zdVT~ES{m>Kj8nh5k80bx_$ekj8zZi7CFlq_IMRrXU@#K+BhDHoAs<}5xixiyzz@Y9 z!FO%!^``N#xo>V}j2}$p+3LrMl6^D#6=u)7jL#|b?6j3qw&uolnj)9y4Hm-A*)|g+ z@A-D=aoNE|EzczTaUW=wTxh-xl5raxeXvsh`)x?zp^-<~o)d47{?KDX;}Dy@L*lfE z!BK0f-20BYr+ErZ(B@Qq$F}GIUgqHrTg6)+m=)mhWrYk1eRNK}V|upNpq3QI&Gn}% z(4AE}b3JHUEp}u8zkS+Z zH{;Ktni?M@e{Ac%Y0{$==d%7u=02sRb)EO|_9uz6Py8<(?;nxsk7)pRz4SeeL$45d z={DGw0xQFQ9<02j(KN1&x|DGoEYbx9=>qWYIX}mQkFV+#kZ1e5IgNFj6%{f=0R*r_ zh$a7J;&34rhv`1KN!wN3o$y`{4v!AzV6ka;%*^0x&GA-N)&7p3_F3T%`Nx@JMTI;4 z3Q#5@O+;MsUqBI>x$?E%IUe+O%JK-YdDl@O?|pPpcX4htYyZp-ucbA4Y52Rbv=}HK zxksQf6z_r@UGJ$qL|0Z?$KrrVTc>mF@N{w+L?X>dy{jK6P&|F8kXQM8H+(H5Ep?2e zPu|P5Ej4m)S69s<$vY0L0_iQZ1Oo5#o+yZA`YZIM(x??fqYZTi$6b_GyR7@HEmVQ$ z)&eC}Rey5>*SA_pS{KKj;mfbhQI7Zb3tUilb91RLkrk{^R(5X9cas7nT)mfNE30lR z%*}_-olt=_EXxuTl`Z~9+2c}$71pR`R)@-J$w?BF1=PGw9yIZ|BMKGoplKCXia!*O zLI!#xC#ed%Y`L}%_K{IW2e>mdO{%arl^X@EuhgY=@Sy=0kE5kldd2RUJ;(XtN1Gck z7ci@&B4nOp%HcvZfCZf&-+OU%h9j39ZwLkaYrCZ`RnzoyufuvAH64$Wmr^{v7cJ(j zm%TUE5^)QvO32h6imX0MMJa#8b5c=EtEW#I;+St}6Em(dgK6+WQXw_+|{yLC# zfhrDXP!}IUnRSm9AG-bN^kRBVP0+)#+(YHH-jTsQaTST!{_HiTL8&_*W@C#?K3R{o z%)h;e$5z#=%(RyL*qFhR^w7@bV-;)>uW-o4$BcccYQ6`V>*9T)4~}kF=icLL20MQ* zq2LYa9v2bWy@J8#BdM}c$+Kwq+8$lT)O)mZ{nd-BN?uhNErqPXq96Fsr2^NYiP61G zORQ(5e$hOeic3||Yi=CqY}DUl$JzqEaFzXvze_ppuU0Os5@j&SZ-5R+m0Qb%x%4T8 zBrV&}298UQ?Fwn$KVd*#VMCjhugCXdjB&;YvUGk@fCo8ph$Sex4~OFv@MYa+OieT5vDAC6{AqkU!SyaHZbVK_ zuB`Z-qQj7okkHBHd2gHF@747Ih8c(Fx{=}G?z%FaDBJB%i~-h%H0!rQRaJ4cf|cZ9 zLXWBLSMJkHp(}@8uV|=Kj_4SAOn*2@Nf4^Kt358y+4rTMHTm%O&(!24ox^@xpx=>; z2c23c-k7B@W-V+;+Nfo#(>Xd7^+S5y3zrK$E5#q5wsGH|>&jl_dIL8)IGPFx8Q4m< zVs8H-BR>r^Q>Ipuiu4=O2%~oMf5P%8{%SW{Y&#V{HjTQp+9F>};yC<{dZi}#>Cq%> zA${PkI%*PCPj9`RE9+kTYsI-cY({s*OIUey4nn6E8mE%gNmlgr^?4R9bm_}GdDnQH zG;?9k+Q%H~X&dn1tNJ|lSzzD}=V*Gz2x|;|?Ywk6Q~4qJV?H(%Bo7&_l>b(qt$M;D zQ{6{k zE!0vXO|=oHD zll)Ef=Am<%M*P^8_uxbOD<`6m9k=#IRo2~<7Rv(f$kz0m`P>|&ViF#@hS)e9OJ@z+YElMk`@Eg)4r`tT&%4PXQCA0aF25jR855+ zDn}>_(KmaTM^y)(A8jE5Y~)SeBdXO6zKqlF%*KO8>FtJ#j1%T?i-2i)yy$urABhIZ zNlUtH^=QDvI8acMTMHn%*@9vWhA~eMlz!y-lBpjYZ})v;>moB?T9;YBlWD+V_JgNp z;AU0(#kchjUkpZ7dy-~%Gc<~%&hAlHbDec$R)LHk2C805Yg!3SULZq=m?$?#`TJJk z_i{gM&;|ZBmZ(2#$J$K;Ma%M^82=NCkAX#;l1gwONJy16wOLqC@iBd6p3ReE?bq}y z3`#VK%%CzwWo0xE6pJ0ZE9{~L?9aCjl(t5IAEW=C8zSQaXvn4cG{`)3jFUA>RtO^? zudQ4~Le&EDIvrE}^<}jm_`_vq{8|6-87CbUFZh)We*)xU4qj2m>_9+*l>|0?!tzu{ zP5f23QAZDUhE4~(cYq|~S@arC4p&jM{_>le{am8|5x_q`nc9f#81$kS{YBR~(-SZB z*~ZVA`b$|EDYhL5Yxf7z$(fa3ozc}lhy&fz(mkcVo0X7ut%crY(L-*oI8ERZ2^~e7 z`D?by0&Z*Hgfh@LUl&G5fnQn zz%-fiFiD%fY~|5tm|ZM)h@1=JwUfG_DlcAYhI;7dxR|YTEk;S@TXQ;L%QtRg25iq) zcevC<4r=a`tzif>-({=Th9~J# zc&T->HuL3zQb^GHOgUuo12rXhk>GbfxeDw0;L_VxCXL~Rideyy5}u%kalOfJk750* zud?wC<#I4@zN12%jZ}flVvwaJ z6VE@Dd!)VSg@3<~lN3Z7eCNyJPJ;LuYMCCLh-H;V2;lEth!Mg}yyqPNXf=PdMsAex zFL!&rr0m`qroEkA>HnlagAU<-8BHjQ$T)3Nby*xK*A3Tea1v$8F zagJtm6!{dD5=(z+Vabr`vEQ@nJ9za^k--$d?N|x2o<4;OzVF(TVR1-4@2h^3s$%a( z9IxB5i`dg5BIqa$1^!}A_pU@J+fz89KB*vd-0l~@q%f(f8jkydR0y@L%Jh4HSP-`; z7f$a_C*8mgc2TuwX07#izmR0$Jronv)ha)A)qHbyse3fToETtTB=f~NWjzG$T0Oxw zVmT_jmNhjSXCkk-{)bSA;gbtTzyQL^iCW^G`JgDc+jwHl(^VN51~kDL`9pRdKr%05 zE$@i@>JIff=1NneY?<-uC^DG?``ECm%$3g#$_iR6(pCx#%Oo-w=S>Zzj>kG)sul?} zI|}s@^dbKoiV5_HztDI{!$$sOEw@lo&o9M`=s))mlF*D>N_~ss&6mViB>ERHz*}GZ z4*Ds5Y3kT{!3(Kz@Va> ztTGQ0*9tvvKl9qqF`p7z-z)}3{voxN)gbjxy@F;i;YcIeWLD8cb#3iy!SbWpcJ5ET zQE8(GP%1`&t1~eSbvy0)>HD^EtVn)~va zP!=}|dU|^aWv=ZBf66m7P70YSjoe$T0;K^+;N6zJg5D9Ur@`Ml>|fTUpiTqSrEsp_ zdbS^E+-@z+4;>0@V_Gn8_CsQ!O)a`lp08-eA#Wi6uN2i-D&oj~tn5}iI@;OVCQKxL zQoB$HLf{cM&TYY~>J9mI1g3j*V#P1eWkuY3;XHE%Wxa3}KdnhF18oXF(3WuA&v#xo zpF00pj@l>wiKXGe=pt&f7&ne(2MK?mik?gfe|5Y-gHl~ZLyP5hf$G2f*?DSm4h}(Q zp=7F@fvgsFiJirgT6FduSZCWr6wy(%zI!fa5E{o)z)-6z{}nt&%6Y=gSaqOUF%%v_ zFXgO`lu*&Ao3_8RBjr%?4%$D(0ak%-NR|Ey4;O1@=cY_Yvf;(AAo;zc|5Foo_NZlT z&-*PQ8&pUlP!^Z_hEOywyX10u>jFgQK3VjWWsnC;CMocZlhO})uJlE8r=1hf-Z5w>a}a`?IAN^_gach|wORl7<7GS(2}`QoQ4T}-Xn z>|YX=Q+GdUkhQqZFOyU;0z6>O9|#MWh)FLM@NuvRqTVxWpvuWNCQJzCaGRBwd{>f( z;LL)4X%X#h4`komWPCC^vXXgJ*OIrNGu)oMwZP)UPd^4?-1t9b`zg&&91qwM(ra-W zJNjU$ydteH{Lz1oynnmIkD2n0;Kalx3!rQ-*fn`fH}Fsreg4I>X4oPV6{vE?$y}{? zX|(lJ1IlpI1$i15l5kx|mSrxJrxKZd9<&_rA(Pg{x!NhgnVF$BxLq~x0Tw!14oa{1 zmSbL=i{h*jerkPgN!hX=BP+97lNV?JIbHs~j&MJE`%Qku_#?M)Od*IZNlJjV|QVTOe&A=Zfk1F|^^3ch6h*+hRma?SmiOvM{t|l!5MhDHF$!(jztFEx`^{ zL;1FIlAG1DN!_~CR(zSWLDl5>6y@2}K=r(qt&fa=dlqg1F{%6yAI6MQ(IrE-m`_Dn z*Ct~Toiww&wH%MeN$W($X9=B#VHVYI9#De#G^6jp*D&N7qOS?EEZs%9!bkBXzQ~@wB6WGA zJ;a&r@`@vOd}t|VK}Eh$IW?IqLzZ^Nzi$kEqA_{adMKkse}qgm97;jBTL0!^fB>}~Yy#3|7b>(GVq?X;s>9XWWi8INNqmT?PCj?5Ly zkj!$qdd;1=J=A{NQGR~Bfa@b?U*)XNpO{vsy=evuz4^LE0x*s%L)lSYymFDrw}o?b z^%TMx1B?sVrK-<>>Ea{Wt?_e0m4s}Ec0O&^fK3nT@)V*kXw;v^||wkpQqOiynA*sy#F*> z>-mj+>_s-UmX>CDT6AT;9J5Lt<6OnrbK2h%tuHdI(nn|zr<;BUbsD&k{Uqd83M7s& z08eM(;|Jas8X=`2CZffDtWLX^eHlnC#8ij4_~+zTsmmv5-jo4YrE$WbK52E(_-g`? zc#7529{&2(zuYV#+nxfTcuB$1`&qhePU%AK#Ce5qkXO)b~M>+WWUcG)_GPbo`k2M0ta#^|MMYBhU zjtO>f-=2wsNObSsSWbq!?#&g9`Lwka`yb_p6MuZ(a&vu%&ZB2>l>GYcawxC0)Jf2x zgwA35IweV}%IO-xbtA2Qtjd_@a*;D46n*h`yevFc{!<=Co0)d7wk$NHa(&LsJrZMxk(U^S}qOGH3c0WPnMyacxDdoM1XZG z`IqY_ZcRu6?p;Ph42!~_M!7PK-8eec}U50iAt1R1``KL#e= zuvVRPVy*5R8%&e)@baQ!Epy{Ox{vzi41c#zPjDL0)o;pqn?coRNV@E811BILz`11J zaA&r$Ts?(Hh)Jd{bC5jPZkpu~D_a;;9B?7e36=o6C6fRGOk1DJPWhiF)<$;3CH7p;(>5Ej^Tq-$#Vj>gjrKpCwT0le zyn|SeEZGl$%Mq8VONYq#A2VzCzs>A}4SV#Ugg#r80h-AsIQAqv^-s}sp4Dbc!lWGd zRhX{)e*RWMVXn0-H&aYw4{-_(Fk{w3&CF~b@sAeF9(q+1Nf6yN+~o_WK_Og1gLano zy`Axyg_(&-Qu698PLdhwALtrFz)A-&d#h%C(KvgMY|&<03{z zTk{Spi=~TSy1JM@_l_(l3T==fA|nyX@y-5qsa!MbWq0X6eh>u`;z*wWjvyun3%})5bIl7LTK@Bfz6e0KRhB3 zy|e3=mi|}cL4bpU6P50Lu`6Ryr0encJryf!79D8G@kZ`-p(_zSp&pD zU@rYhv>Te=Hb4=thkO|XY$0HohZWiq?>6=6N&gEo4lkWAi|S3cI{-1`Hfi==9AJji zIo6;(k1o6UN-5PU-B8c{9LU#*c?E|f1LM4)AjQK2QG~XJ>V1YoLX2kVhUYr?#!A*( z_J6qmc_G0Rzo_JrDVkA4=}>pAYJQ}g`CR9}7saXWKQfE)ymoW-Js4_s8A)3j8sXpJ zhy1hGmm-8DeBZZ}8WEQbgrIpCoH~B35D~9V*M)}g;Kqme4IiBTbRls4UW)c$noqu> z`QhUjXF|*(7L%bqk_EyTvnnw80$n;JMB>#*_cR_C7Z)Vo=e)*QJ(K<)qMTRrMI?5 zq~VaEX#5!OVKbmirc_94+PKkew8>3l@u_QNiofQfrXRGht-tS0DNl_5mtTz^=K0s6 zF(Yg%vkaGrrhso{cCQyz!*{-CBQlf|9Y%&Z+D3ODgIzY+Y6wr+1%Ezhe*9>;j^!h{yw8!7rCUWmJ7Fxu~g8i&9gyG!ooYaB;(8+oC zw#_Ez6@s3SgeIIReVd2|;eK0<;mbW_7Nbq|0QX?g@!xwGez}L?l9E5h#+fC5oamF0 z5SEe=95)hw2tmD zBbbAf0S|v;Uxr?jEgYRfx=4nQOWXAD@T#<#nTg>-UD@l*hh|zhKcL~Kr6_WLwqgyo z)(IoTf#)8$ulZPwy)zO(_2~k1cXepsn2Jn}cx-5o@8(Cm;#BJ~8be2C*yxmFDk|Lec<$?SOHM4Sl}PPnjM)W25<$g{HDeb5$1I_7BYt{P`_x2pEr#7((Z{GB|Sie$;)`4lR>IBgGV@uBo}#^GR2XAC$Up=m6uTj zgdIoSZ>J6I)f>lK(EkX-Hlrfg-kzyZW}U@f2vDtlhDuef53(6@*BFOkP~$=!EmODF z+xDMHDsitlT>ePdWEcx@w`*l?v6~z^RccW2N}O@wtjn9QqrBTr&$0_e|Z@><2EEDB=L6p)5WoH z)E&tikKthvm(g^+o1+xdTM@8s6MjMuvtaYntU;yGBi~Rue}-Rqdx?f1zots+Uk*6E z+R#kv%dhRLN`PSaRFWc`N(H5?_^gZU?B!>Iz3JRsYTgK0FZqt*cRZk; za(STZLA3Y4lj(I{te0uQ0rH{v3*L#BuD<$?V03ocd`HBztfi=f{P)rT_Uvhfoc^-0G=19q`}ND0wgbY+@n$K7MX%B3 z;PCKv4;xld?c|6>NZHBM~ z#gb{&7G9#>2(rBDo{`{)7Pw~pNMHtw@~)ZrP?})PYuH}RC6D8YfvcNaj@CIokLyVr zvj{g86-k!)C>5m;875}L_sKeDPfxF{L}0W#kKF-JIDv#h=?*qUom5Z+>DtByox!Yy z%BDbePWEO~M`y*xiqEz2@`J=1M-1N2#cnznYTM4#g_dewHgA8wQ)C~*+e)D019eNQ zd6=l!ZN-#TJh6VI1(&PnSAXsuBCdh2{S4ThS|hyM_g}MscdAl2;8hJD$ONd9?XWA> zf0VCS)^Ebzo?~J%GwWVmUG>+{_;?mHU){Ked1!Vxr}y6JM=t3SE(p_s@L(n}Z!?6b z5V#Nm(#9}`Iv~33F-guSfOSNGnJMwkZGJ^(WC~*>;KplnJU&|bGENJGokGAE49-Gb zJ8NrTkCZ2FopYV*cG3>b#e>5+?ake^l(=gHs5><}Ddc*^gZP7#VCYC%qv84XO4W0Q zm!lR9OQ~a_#wbb-8EJS=c*<(m>4qxbt!!bTc&LnoA*V!`6!|?ldChL48^WIcKjrCc%Ar3#K^Ai~`q^Y;z(|bir+p}?ay=kD@&E=kR^>Y382yJF*OtL1<*yAO*HaIl2i$6>l z0F1u=EMF^Ql}jFiG(RCY=M#IyETZgurqW+Aen;#n=2a8hY`I#C+SZVhSG{JSo|Dqr z%3;}x6SnifQV}b_&{|t*N(dpVyr9XuV*89z-!;JaaO!d_{{}z^fEh+bXL)OUKV1SX z=?I;zcQ>pC8nU-g#*x{w)IUuc7)rnuFtiGZ1mpm}L`MJ_I)DOAnxk-G$>ffKlaFH) za@wPgCml(!b=WS2oRk=>WO6uMm$#m7T(7JbnWuLa;|-eLn!*{cX#MqFtkEmdJ>~4` z?J;`jnzdc;vfKURbsHb@eHvN|YHhN863A6)Iby=4U1$8P2Iojl%WV^x-fRfeId+s2 zv-bD*gMx!!-$|3vw(y}og%ioHYO{-7=U}Z!Y5wE=ntq17$Nk+GAOsDi3gI2{J9qrX z3a9?Q@9WCg?fH8^s36~>Dsr)fE`q%KLE>1Z_o(sHHQet&!;>ifqSA2W44*61D<%13 z9rP8WOv|x3rAJ~L!Hm$1NSci`%!v2P^Ke%dTQde+bYqVi)1hQK0o12g!4(65R{K;`)<_8vLkh{~W4nR7@b2<7{6;r@&(%N^P z5VZQj?{z?7-xqMWlFN{qTC~XB?q*!yxwd=bX#lVLley}9A>Bi&n(Bd0(y*?GM%&`? zw8DCfnlB=h)E+ZO>}pMUt=JdRsOqvRwlEv@ZB4b@*22WdXkOfLK*$xHf&~#GrABHu zRE#UHRV%TdihE-)!~S$APYo5(N0Wl@jKe zCAyg&Y(nUCHYV921*EzR*i)LO#xQim6W zHF-?V8#ct1A=Td{gbou6(z6a0y#G~s5qsa>(rKmhR{r4No#{Y2kBUnSM%`>^cD^h| zgyO#L3LVlJ{8h~!AfoYSI;GnC`N<>5zbEp~Q^wFEGpZvP`)iB$BVRZNo{&(>1l+^j zrUoibeS2RJdDg^OiAjs~Z26MnXfH|-7HQY`hoD>pjm<3!K0l=l4%W>s*P${Sy>7qtinGOk04Rhhl3V$HQDrkKJ$p_1?XX%IKFv_$g)CDubd=a!Wh%MTb}rG z;JETCuty_-^W6)D-`d&S25WF#r0eR9kNoR(b7(2YjF9IRZX4M*{Mii={{Oh}j07<6 z2L8AFEV*kT<<{SKEYU5QOyP?1$h~}l!D}(N_06DW&;YkVldWGHY$2F(Xa4|V^EMNs zLu;Tz!yJ$zR)d z<|ZaGW2JUuee7es5hU%u>Q@4q(*(^%-Eq^?_2c$EgiF~`0Y07GD~<7d5j zS@Q382iZSefKj*|G1zLN2@e|Z#+Ni6kv*66$(&#$gVzr|s{l2(Ap3L9s|!+la(JD; zfdljNQRM?m&XZbYphl9-vYr%#r8Vn{p$9@0geA|!dtiSQoP*7@#&6Hc1;!ofN7>)F zh>oK=oUBd@XNUxkpZX!d0`ZSGTav3$c+FC3hf&BL@Q`|MXQUnMsbT5@mT*1&UGtnY z?8UcCh9-bjCDKwnDxY}?#b!V>&OU`rnn&q%ov~2RUOIJ-}~E9Gw=qSIhF0M%yBQxek;^bqH*KT11_w= zz@+4{(=MW6WH~6JktD*+PD4D&)lJ36Nq~E7E9Ia-%!peG%52<<6h4{U!+X#50Cz$B z5&O!K4V)D1r^S)4;%!BhKbndm+j^J{^Z~_+4_Q{q>Q%cgHFMx)|=E6DlC z(29lTu*YJuF)+MC*fU>t1sl#fKJ3&1=4+gn9__9%dy?bz?#R{dD>Dc zQu}h2+9o@nEq&pIOSK|}>yH@*f`}hi1>a8ycYSYNvGhe0kWd$S3xKOy7yAkIOS&O~ zQIV7vLKuhJ_&mmz+UiPpsDnb;VBDDPfmwBms@Z5%gP8XrXeoRMB7ek$YUB_|g;0mMBr{5+MV>n~eozZ`A$d1Zki!B)NME4qZ+6v#} zzg*wf9Y3MWez3+K8}@HZDM@YR!BpWKndOYHUaWw;U^_m^E?KzkbiZB$3@bh&Qu zee8IjTsWJa-da{aYiw~YE&KOK5+Q#e+WJDFo#@5H?jRvqY`bakx;hm9b5bJ^lYJ)H z`Zqs6pJ^BS>aW#o#FG`B0d8Joz~Ic%QVxttvCT-S9AYQ(DtYW`W^;K_Dla<+_1{%q zbsrFv{ZqhP3l6q*+#eT%ejiOk@1C11h%S~5$jSMjUH^m0!MnRN&b$Z*l= znZChenqyui5gy;>&Xne9`HWankP|-*8q`@NIo5Grt+CM*#Z7UEdYDW@S89(djZQ5Hu7RwT#gJ*Z|=Vlk?n5&ih6}{P>S~@`;7qDVV6pA zb3SxvN%Y3jkRZ>R#acc135~V7>sjLV9Wil=PMh%>NtY#~+HRD9prD+ngz?Du`(7ryZqKW9OO$3%J&|mxGSErP{SShwBRVRcKgP+ z#yt~)9O!?%w(T~@ANj0`F~Jpq#*;IoC%Ip0>+@Sf%hTp@O{UkRirdcSxCh7UOwr#o zCXgbwl?#)v-eT- z7Ze|hN-YNS6MXMAg5lxoym_86b_cxH^JZ7q_pD=*yqZcjDAg&q)A%-d`AR$udjT9Fb%cNW9^K&Ht za_s4kX{GZG6K-+kc|Jm+RbQT>t^g6*;iLm>Z7*bBBDZXbr$R&Uw_R^JKLzu+ZXyP* z2x8kb@!_*oow_>p=-^z*yWR3Ga^$uy>c3}T(TC7-*f8l3Y5j3~>M(5*u)^e1blXDO zWE{y0^$Chh>u{)O@na#KUmv_CNVJMZ=lB)NYcTsW`CLTp1*fMuH^~No59J*r*g*J+ z0G-8QICpTNBKcYRF}s;#@c>UI`<*k(x#ORTwwZ9tD(~s~_m>wujz`NV7WP~z*=hV< zBpJ>8K|QbAE0q*PH@+|8%(B$^3KE7fzq>K?FY+tbWZj*pO)7g2E_d89lQ(iHXITEa zSTF~;_m*5mZka^6zVwmYPRrj0Ob26X>T-%fVLL6Db~RDNfS4v~0B62Eog!rnSQ==N z2<1>bMO3vx|DFt1p-#KsnK=>d{_ex0tjmvewy8nI>26iM%{cpNaBAKS@|bUc*ANNFjN+}9!KctM@r z=jMSDZ+exft2-|K1BMWHNt}`B53Fn)*zak+$I6NJ@haY%P*!(?d~s{O7Jd@YuKym0 zXW*?Ez4iwhPXC#i@lF0Dn~C{O+kiH6&Z77R&X8n$GC#*JAd(5EpmI?v2R`6A{{q01 zY)o!)=i75F=wC5tZT}~RB@>b|eLz~T0OxOI%{LyT4{_HuEhA6s}>WD>l0%-<2H?bL2)D!(&;x{z+>TW`PZH46z1JmhNu! zgbR5Hz{q)3pJ3l4F)uFa*>qa?T`0|Wh*VUM1`l467LA(%h8T*5-SaDjUsuY3Y0bR7h1`>QAzym!8*LO<(71Z- zHv=moTN^wK4c;_bQapI>wGU527WXW&{m;)bW{sD6m|ahcq_#-J!y4X(hp2wj@*QVQ z7;4>W7l!lXeyqhkC-C`2`6yO+ac(*7OuHGSqbZwLb57qTySdNzE)nbfp87(}N!yDj z306Q?c}Z4Ip%MQR2C`jndR6`*gcnoVT!M?{CQ;v3ciU5DjgaKY`TZ+|G=23S16N1Z zusWC9$8#+X=IRJbS|8tak`w+;_d@CHnQ=XqV{hz?hpMdCyxo2U{RV81;_6jRkqE9U zbdgQ4Mz3b&uB-V)XX?Zs{l%IQyY;HvcJaj`XiEplTrO-lMdEr!9-Dqy4VEJ0v4Yt7s?4tXrM@_T>Xi7at@2jF14$@# z5l=9|M{czgcn=-ohFAnOhD=s27C4oimT<3UGQY@r0wFgSmShwz7VU^L-*VSOzg`0h z!~I{w!d_xW%U=)XuUTK#xbN2CZN)$Cxpn@wcO>Sv@LgVl_LZ*2$W@H?Jc&u2qQ$Em z%|H-@w0Y2Z_Z@sB$QV{PW;8jKWz(tCBnz*It%ZyoaG#79@0GmPl4|ynHQ$kEV_26yQ+KCy_$md z!Y_HW{~~9K%>Iywp`xXGb*u$`N+nHC@3+%~ep@?--pPuu0X1*W-Bmaoq9thpwcSgM z#444gU(ylgS?%u^jB$y3X`s9>Q!q;GqE(d$3Me7Ds<@elc8+MX~^C z%aA7;hC{^!&qG&el_P+r6#$x^TaJ8S)bX=wd~UC2^8ReyJ{c^(shspQA9fZ<&J9@g zkF9_b4_EL;_Muh6JITVG>goS#fzki{zO1T*LQX2n_TFJRT4cI+0hhd4keE3I34i$1 zS>(E%PT#*ItOgOf&Qr@r>U{V}aF0nwiDHAewbw%#;ad_3rbE6y{w+n`qyfJi!_3Zz zya9}F3snyrC1|C&*FD>1e`vmbwT)AXvHMo{Zy3D;iQNaZFyUL7Uoa)CMX{0P-b@yH zC*37@^h&SWJ*TPj%UBg9w5AGcJ=cf57lVIU@!2W^?j_m#@jv1ZXCELo8=TJ*kme*n zn#c4X-(;s1&ZWZL^#wA&i8p93S5@5XeIQwEL{Ny#2;4542kW%-Vq>`Ij;-}weiB%o zLoM4Y%2HN5G6jxjGE2_^X#f;xT6nLeL8 z&eO!U+{CM!8U}jf-qP7h5-fGWS|(Zl-yt;V6MVoyEdWb+5C;UJU|K`W4ywJ zTi~`2;hjrnDN51TJUF1NLoWy5gN)AGbgUPjh6^c_1j?{ER;v;rreQK`Jy(U|u>7RB zeT!CbkcM!-I;_#FT+V8$q{M^;(~oTMGl~>Mu~ESKsoh-qeNOz=Xbp(Il8o2aUt0$n zFO+)$t7|}$v`xW{$jf$vI0btXm7!F;EqAAM0)p;{cPrz#qY)Ws+}s47mqOhi#}Rx3 znAsQz4IV3rgv(a^pC|=Ct_85!W6FGYhrd3Z$U?V=Z8t^}!WfNAT`@(0+x8?&)CPX! ztN&|z0{$5b7OFhhkLVbnqOmL2@Ua^9Z9F+Wv4*j4UGXjzVC`xc?)TJcw(jZtad#)- zb2&zEYL|FEye^%`Cm79WS00%7KWx1POq^}-FIwD+6bcllcyU@>m*U>yuEiY|cPlMg zpg?geR$y^=C=QDjU)*JJxBK#)^FO)w&$YxR^+mY;S&s`*iDrJmN0Y?~6$lNt$kqR^;mt zAiTe(KOt@7<7|yvCOET;6!ts2HP>2Mg04vZhYJ9@C_8el@BBARZl^H(5mm%ouj*oO zE`;ygb@k_!kq&g?c4Q36_EJDRS?AV!Zee4s)1eqIfULw`%se*ovrW`aCL*H}1k6JG zc+keK>nXT7MqIxTTlYYBK~N@r;x|#<|Dbp`U(`=)AWcQ`w?XU3_3Lb1g^upVfb)aM zfVpATDX^{aY4e9vyGNBPtFeq9F_ew_V{~hd`wsrE7)|UdCfvQtKKp(mu-FTdIiiYM zXmjz=W^;OW;f(afMAuhCH5*_UIF-2Sm7*6kx!rYqmz2Tpyyg4Z9U=t#?Q_w?bzFXA z3*SBVeC8Bn1BpPC5NK|Ja4$m)BWXee7woF87L9krw|x*Gd?7X&W9WU9i(KP0)~0;Z zxF+5l*Bvg;XXk55W6{EkgH$wJX=M)FFi$s;$y}zx+Q_^Pp+;>_HN)Mq;{|JX|Jqm5 z2*uVj#UHr#FSH!yco`XqEq8QJZnO&Xg0f=MM}z6JcucT zVsmaLZsVyBf7Ykajo<-ge{(~&+deNKsr-;yU3I;io|N=8 zL#QtA^Lj7(OFPV3UHg?eXquK}&^b3n(*L(EIn*C6INLKrY~uR-%G3GO?wZF+(lP9h z+V6I#btM4X9`(AvSr{UO)@dA_x$F8rtdEZ67v|{|x?3$X6b-)@2d>USHa#=Q#Y-Ir z0pfZ}A0$1NVVmFLw$VJuqyHkSj?^vYLQ0zx*lj8~=&=Zkn+}w+@2#0(!hwrzmFGIV z`|?1wYrcHy6oD!@Je>L+$E(jcIa$+Uck{10R+PRf)rqbG|H9<3f+DO>Di${@BCMp> z^I~^ddpF3Qky~4Y3lUqSiheP@au7)`#`gFkk`Heq*=o7G#(e;oLlF2KOk44AoEx( zILU@6AXjBk{&^Ajw@c{$@@*!{s6 zYPMAgWY65^>{+EMtn%D=6l1$4x#+vB2Mt>o5;9jW)L5d)WKhvYi2b7xEwz1!@yN{Q?lT{h* z2D<=WE-uZFldAdn zV+fc+C4WQXcn0r9DHO|qA8IUi#!tXd2L)VjB-%9<^oqvw#aPmja1B%n3fnOzu|990_x3}-hFHDhjqy$n^v276;qkvy( zGKi(0Om5QxmDeNo{aBs?+WGEySliUxvWx42I*4z)3nhn0yuoztO*pI2`Zse%W~cNg zl;2;-_6s{@9rb33eOK(1hW`e=7J<_g9;$GWTnoL!O3CN9SGZiyuj>j|%D@8!YJ+Qq z1(A#-3*IKSy-2j9aM>NwUlQOR5s8)d(lRfAP)s-mH)9khd3t`Qdb=YWwU>dYguw_2 z^z+j#Ap6%N;MU0K{zJio5WxW96UryTr}o$O(LT+;ySgh)abQoE;}fDc#_s*XVgfTq zz0z{pdb$eKo)2TDV%mCJt8sYDuzaW#ekY8j(#DQ{?BL_^Z3v^;*i@mZabqsyZK$Jg zFLq)-c<+870D>;MN^xopOWZ?!LiLFu?8;l2M6rE9t=01k z3>6{)7k&j|fkrkkGNmOxo4wAYEZYGZl%(l}w5EfFB|ex5p(Ew9N$raCM`zd#6f(ltS58 z!LF2Q%azSdseqOd_s@32Wc}d#V|`dqyZ! z+TRmGrTdStBS*S?gv?ggt-@|Ryt*XJM6+4`5;StQHo3*C^m}_X3~(rgjr;;>p<#Ei zk*-9UE9lqRg;o=>VeIhIDB8%#)|`*x$9deDV-a)HptIC{W3W6VORnKBp3JMxBXw54 zyOfv4sbaB$`gZkEn(fVrOJo`3mTPszJAeLZ$G*;bI)oJ-u*AbnSG2&*cfioVviJva z$D4m+W!*A&Am2uq5#o82=ITdAjS80~8 z)7l^GURa_w^TF%k2X|bAM4p+6K=@&d&{Kibt{~BJeX0E*V$)`{Ufc=hcH&ch^T87H zyq1ngg+x7IE$XPY@cLUWS#aUUZan2Vo1Be7?AR7(HBIldq!G^BPM^=q^@jFBU{OlV z?B3_5u9uBO39c&j*daD5W*v%88$f$?7#NF*$FA6B*W0U^{`g5 za@o}7&&>Bx%spKi8Wnk~z!Aw%WsN(rnCZXi<`p|S2~!e9&w4U|B1V(mu7;p|7;&-q zww#8jTvCK+5}^rbv@!<68VU?ms>!)=Rat#;==3ly`3ZfY@P3-7-bPcDa+muCr=NI86K0G3E;$o(95-Ws-E{1z3;lPKdb zPJ5M zY8;ANVt}D19`Y4?Gt9!%lOGzMDfRIqA~z1s&R9+!a$6GkEnvP(v5jfy>h2!r=k4_Y zo&AWGiI*8aa$E6h0O*78=@dVIV}s@p<8(y1Xjf;e(DC`TwbjxezeEMkaK>G(0Na-> z6vMb+EP*1=+Zx-xI`rKol99BDyTC_|)XG4(d1UXbsQ?}^8dhXKWnZ!?- z%&jBPT;t6TFC|66B56_gPc6`s8KhZIGt6N=rPoXN)!SjngxzU< z8Att8yu8v8mFv=#k6`eb^!gr~ClPg8u7jC6hrJsyoMU0_y@1CDrX$UtlJF&<`JHO# zhtt-mXjl6AiUjrpbBbNYwL>vH21}U`K)UWU!6450nwf z`Ee+JMSKT{h#j;O5@zI+vK-d4-rZ>uo|h@rcNE+5zMtgd35iM5?_%4x96Ss~y^~9( zZ*N>tjl#}^XEC{YH}j2x)bXvQN!@Y!L9~NMgrjfooz-d2>@yBdodl$%blQ`w4$5Ry zQWHX80hWt%PCPM_sdR-p0|J~retC8?+$m{IwH<0yRaf}_yRYLe|$ij~w6 z8y46q{yR=I({`(NFxy{g{&S_!4&wQ}tq_f$hs(s^9`8Cid0Kc7H_? zH+U9d(HW4;%gzd4U_D=944Nxq*(~jI%ZP40`-le?@dzJ5^3DOHKX00Hkxx@E(l$f{ zJ|i|})?3CQ7H_mY_zmIcNH?g&8hhgLKX`RpG(q#;kE19ZFfx~N+suNJB#4f+xKA>q z)u{+TSJL397M$1S@fqplD_WyuBF*l(gMO5of68co+|58+U6wn6rB9h!kBfi({_dY= zuN1VqCpEQ~lzfne1;ol}scvoHcZ|I2AE;&YsA{O!$)rs-fygC?3h{jcI#*Q8-ZZjj zjztLlheMqIYO*YS!mx@4R@SCMO47y@21kjGNgzBW(&B?NTP0TZ_H2$5f{)r-@hk_} z7V3=xYH}ABy)S$Lv-)>Kh2Iz)sPAuwbP+4iHr3qM-U%b<%7~mYb!rEs^ZSzn#98@Z zLBh>QtKD4qc;91qcR0?ggXpeV=XHSNsE}`xTX_wuAFtKp10O9RDG;?QnBjC#_}vX+ z_5WkhqW+m(_C~94GDk#wHC$Shb(`9dfZY_~M~^MQ-0S!jm$xDf9mRBFpKLpMJ%0xk z<1+@fUaba#*Hgt!imvapy^}jV{qBf=-wQB)e(g?m9Bgn4g z&c1t;*0K}K?JOLXe$_IZ6yD+zOB*DECP{GqnpL0NP>WgVe&Pq6PSthDvg4BFJSHwP ztTczCJ6jRPDm)=X@~}{_9Iv29Io2wRcfPCQX9d-{UMYSP{HGDQ*!~2jY^fisHW1&R z;*AyDsN?DX2|sm`f6a0k?i9bmEij_`aqbKAD(h)bA1veBH1_+G5gg0=ib;(5mn?|+ zDLYH71{S+;+Fy5YZ@w0Ox>pkOg>p={*L@d1vTK8B5e(w&(|_26p;D@6Hs|5w6-(9x z_N=|JeAtMQATEyLL(r;}=EE0e%4GG(N6pSZ-2 z+mqP6ftRpyqo`oo2eI@R`&}ybiHuU>{UqzRGpmorZ193w!ZZ91|6x_C!Nrj{kXC6~ z;D9}bt{7>2)DLR!`Sj^4n^GRphe;hogB&GkUZt36_TT2D0$B+!2N~G0b}YVFtj;Lu zy>j1;V13&gO;|L5`E6MRai?I?i;bMTOX_n~V{_x!QH`d0Sgw$etL>@j*`jR*Fn6?t z#h)TnB9;as5;9@!VK${bDS>>&21APY86WMHj6RYNM17*TY3lvK@ihN~a<*XInR-R< zSD|fG+XyV2^T+RT5i*2EDP;&#B`cgNDJf2meC7Qx&z1a%XT4fr5d84<+Dr+6DZ>6R zTWvg(<3+OXW|G7z{b3yEU-aLjz9bJd>81(<)=r39PMaXsk}*kK&DWM3pIn6TA;3^C z$g;`AwT`?moGu2qx@6cV;UQELB^B+Uc-DBgwiBxP%Uw`PC3;k$P|W{ThG z3{GqRq2V?)admZeBJDBwhJ2!&*v{TA3K6WO7v1r`1aQqYPsX;!LdqELlfvftP9skz z=Fjk|Qb&WuUbtg~Xp(fZ+5brn+I6n*PL`&~aGdwO$AuDzFRfs*+$|NHK*U=aI{K>S zHM>Sc?c0^@2%aZ?6VI8~+)gtIcawx{{b9Gg4;T|(jVbzBk^XMU70zEqUr)IX#NkXD zJ-N|%SO_kW63TFL)~ja=Sw;&T9tAoYMM$a{i3m8Bac#={K?b?LnSVzJ*?@{j#7s53 zR#2o&qQ1*ixUcY<`~(A)nvfR)d|=dnVSnj6Rd(2VIc6-Fpo_ z>f}?uPGAs{OkeG?{csRd!9=D=LL}3np_0vq6+m9y*X0KHOpzDH&9nc9tbAm!BLnEe z_ly|dHaIaq)x^r%jt7^LQ@zV+YH+QzYpw>P-@WEfs?V%Q2Klgdp1l&O{g!8T^OL+O zfMRCl5uyuzn+$dwlpumH_z+#oZ69$Y(9+cyo|lkKQ#jFCrAJV^;1=$%Rz&o^E@$(i z`5NfLn~SgEdF^L};VN?}8zhFXhAt@WtPHkFSH?yD`$`{5k9QOr%iEaixUYWB&|flI znSIug3e6*TQ6@{8(*xiyc+ZvC6HW?pM?)oItquZX(|1N@SXPK8RfP8^wgav0dKR>HmF8&K(A&+UrbV0ZHOwiER=4K%aXew6w?FF{r2iKjo zvyA}){(whv(_&v2^(kygRqM@_704Tby;tP?64-WMt3v_woV#r4+}u&&c5R zCpeiv_&c>_PzWk^oYdwEsp8jqE+YN_scILo&12TdD@qQTAG3+QcF#wCdv@-m?D6y^RfKg{v4I&Z5W2hk-K;n6b0TegqABcnlE(5G#7a7X41^lq7!dIaDmUr)G@7 z7^+kz9>8o6=qd@NW|L`bUr z-9x)^OvLHay}w$ee_3S@y>>~94DI%CAL~qz=f={iaKeuiAW@~SW6dU$cX+xI)sd#R z0cPP_uJKo&>kw%im3|(b<-GSAK_t!bGA7_IHM&?H9~8C8q+t2<0T`0&eVB>190AI4 z9<`v;YzOChh$oYl`qq1oHir@Qf7jLQY>Zk_E<#!kC-zZdEbutOqEz4-E-9co^78?( z%q2tGY6qb41$y4rb^<4QE;1F==FSDKcHhd%g&6yV3I~|(Z@O(E@DYVHb&P&oqIIsX ztEnj>0*q<*w$Hwf>@Ab;*7`ml0-amX}X^1+WW$tM;4V@3e8l32SJG z%QY&6$73;i4Ga7quQt@KxAr-Tr(9HmE3Q&3w`FSmFwV5MroY=*k%FpDB~__or40Y0PAfp7S;m?YK{e0rJ%MR3cl% z4Tx^tPs$)VyiFoSs1Y-U{>ld71FzMNbl@z$7Q1y^oHD1Zl!}aC@7l72ZCoZK5maxo z4$}UnTd85CDQP1-^r8s>&J=NAX5SQ;mgDrpRvqUxHikpbcW8#xB2*Q6nHpQ>Rr$dv zV?1Ly3sd?1IV?eNF9dMO*4W|3?_L_0N6hSIp0@;N9EH?#fr?9UjbbolaM{4_&HajG zu-lUWSSkg!Hdp^~g{|+9E1(QbwLf3rkJfub@^n^{4688ERe)c7@uG7?s7J2IRU==F zD(c~$7VVcC^Hoon^B`pjgK8#6r-bvLB;=+@zJ|4ABxTR5050?bUqb{Z1zxom1_)&` z0V`NWGpxR+iZ3re&8?vz%SoZyQJW(WL(Kwyv2~ie6h@fQ6z+AoIm~+Yiwo-PH}BGq z*(-`seFRN*fBuq%^cDCovoi!Qh@oG`MVHqU&9e+REx)kd)+i@K7e#X1f9{tg<^OO) z2CHE};Mldv)EW)HSTMmTZzvmQamA6#mVIb6ljex**WY3jm7e`bX^#!HiUU>Xy@1s{ zl2n1Koz^3GEv!Y-TPz~mms&81!-dHiD};}TJ^spFXQFrr`mgE|k#uAr79F^MS6n|` zIx|Or+5G`kccy@jF z%~TtW1|u_IVm6~K?;>XY%(61sMg=(k2(ZW#Z^^=~uJFKby5`@siq;sDm^_*tk0=K} zDu@FjmesX2)sqaYx+n%2$cILL`+glgPv0p zkFfr?!NI}7YQ9teVL1;7uJ@}NHCC@AbqF=3TxZ)IsW$ihzXx^{Odtqn)?wJpVZQr$ zqes5OPCH9RYMhrn^)70pR!zh%CJSZ z=G(e!hlr0mTZ;St`D80aCa`IXnt zdOPvsY5Yv&LQRM_u_e@@2TRQ&TPfUl9%5t5u$V!KxRZ2R7ZDTvj2!*=@ym`o8J@M^ zk(`=Crv4*mp|c+VfI(MsArwt&077UudjHZu$K?hroTv{;Rl8vy+{oBCLTAK(h(xFG zZ^)5IMIdQ1>(988g){#yv>1G8fa|jAf3L{Bh`HEc-by?J_sBlbN zgeZ<<2EhsZ5{@~6n-95(!*?~TJNcl(`{VD>EJB2)vzey#t8N_a`!6q>kbmLsP&R&{ zrD$X$+mIMCusry^Uz_8@n@9s4eZjtzxtGNnRMsC&NFPg!j?SpMRp!LCkN#6shW=N$ zhf)}}fHgKkQ$Avy-3E z{U8=v_W}BjVFHLx4Oi#(W`l=p@Tr#0qYl%u+kPyfDwJ4+hy}q>D&Q;$1>AEad2+Wi zc=63eVS46r`JJzCtpvh}KOUj#X$}ONoSRRqMnfnjJ~o)JDC2o9l(9c3)AM{0Nd!gw zG=bRPG|B6yENpH>2qbSo*&bc#DTznF6EZ!EB}A$IFG#HCl>Rpo$xjTglI@|69-i^7 zlarH;UIQ#47gJPTdrYn1n(r7Gvladnxp{e9htVXm|MJ%oV#R(6G`#hLQ;%f>Yu_2i z`rBXWBG>~HS$^8!(z{N8X31+`gXS)9N;*I+g z(!{3EvxLdpBHJf66QA{|%^B${PAl%=e0_M#76B<~1k|8Ow!=>2C3Tu?0qM+P{+T89 zN=bsf2QrGNgg`gcyj$NWxG{)mqX>oFsJ~|kDQnH4k#@?yey!s@3A{;-FZ+2K1Cb4Wu`{2^bZ*f8+_5EvDXll(WjCX zJKKPKBTB^gSA&@ad^n5q7S{Y}%h=3CY+LB~j7qXUVMXTt6)xq*Cjs#POk~uA?8h+#^^L%PsBWG#szrojWN=1ws02GaBa;-m9~%$j<#ioKkAy zpnNDMFYosUqC+ID(1h81D8e3j%6E-+@IFS(OeB#JBI8v)spV24IJMuvHv#xfa3`NkF)yvlwr&Z)lx$T?lc}1^XcG-8T-h;!sFIur3 z-Va@k|M=okz(DX*-E87&X`)r|;?dJfF&E>9jZ&c&EwdV%JRw&0{_d~n*Qr@AniwJT zD|sUjt4U4*M}ohv?^B!%d5a$+4x;!q1 zD%>&-_7V2^X~`IT9@ty}rq_tNU)c4=KhjM$(o}hfJq@80_V?WV!;1SYm!#hwQF9u| zF4kl43dM^I0K1RLS z2C0-Z{P)*qM)X%!b?i(AfDgq+`OJrc{eC$}^%_)vD%|tiGNr%q!TLfzUC=9DzUv3p z=$uZHfpTFLJz~L<>#`GbNlDk>rr274T=xx*K@(EIy7ceRa#-_K$9IChlMCW31?>fW zez3x#_|saoN`>vrvgb-?c>kskW1|Gmdo+*UbL;oX9v#`@+!px$_m6si`}*Wo$SD0Y zZf$}-$3_*Xw(edB8Ae95AQ-tE?M;C3FGU|WCavW0QiI@Sg|ctQ+$wR!&jJ|-E)MjY zKGY)*D=GGO_psQ$@;)KE<~a>X)fetr*gUiR+`jZ_u5f<7_#@|)sy2HJ`5lR6cUvcc znGxju*uMlJZsB)WY{DAIZew$l{R%($L?m`aHL z_?U9o(C*wD58k-+c7gmZbK9M9x}&Pl1|{p>)1wwF?genFeJ1!5kYY~_sq$0@XlUy^ zU4Od_is=1TqvSun^yb@r;nx&^pGY)qcH#)1uoqa*;YY^%^{}q({}qBQ-pzQ+Fl7Ic zcItZ7k#TpJ(5r<;%j3ima-9NNO?ZgsENaEUMn8qp%0ONeFSot^UeM-E-e^Tz?cQqO z${8F8al)Zulx5b>1xBO6#GCa{16SLx@Eu%VbV@Zi+a@f+Wn2_8SKrb7F`H#jG16=P zb6rW2yeyV%GdwVFc$5%lD(Rw{TAr%CWtfal@|{I+%ZOife}-L{1hKvO!0I+C!@6!d zX2~UgXRr?Gp@9Lb*GVM8m zo2d?&Q??+ATwJ0Ho>)F`D)47JeoPogr}IjcPU+-C2M{Gfph=&M zHuf^3D{>BCWau^^P<=3w0l-VUN8W7v&n>|i<vr4sOPF7X_)AR-Ov9k z;SYe5XE*1evDHO0wX>kD9pE>oy^h9XoL#vd^H9hTd$l@Jlfe#^bMu#WyvsPgvw zfP}fPH2>vGpPHjFglMwuc82%h9$UEqxebQ85V6LPh1#P}KX+~QrECB>WspOxgG#QH z!i?S!D&dzISqpQ#YP1uL@p?qmfpZWxUgL|W8g>b4bTs<5(~Pb3@b6Cevd`~ zkM>!Zu3NL+qRr0sVEHt&9nz`+@7OL$F9UUWf$5j2wp{DfFo7v`4>fO zUxkVgiTwDkX8jH3Wln0!BQz(@Q&Rsd%lp%4FoTB6w3d7G`lC~oR>lJak*!2 zOAR(c$16-eHJRo)qMqN#U=D0BIhG`6b#^V@mPBl##n_2xM@c7im|Po)T-m}O)BxI- z7Y^5)b@|wpo>~|94j>XQ|kf2bU&-FDSnagR@$A*fBoj0LF z+6`bTcuH`bx~SeT3-h>qCJQVr-(>6Cb#DBSS^H&I(Af=rGVpmG>;&MTCRJ4avV^fc zLGdF)@!gHr%1q3(266SGNU(^azY9PU^{iG`Pkx>@D0opOKi_jf6VGkcrY~$TU4n*D zOF^JwuB69+^mXh%$R~B}<*!FtbaT zncnK1zE18)4utpu`x4n0g~(ELbbcOM_(V=>{VXRuBPg$AslUg~`k1{i6~O=XrB`58 zB83BVm<`93V~d8%@FxcJs2XnvdULGic{kl|ojqgZ`I=Ni>->NhbwsZT&#qkr_}k~W zcoHn^`ICn{mTG&fu4TzTuU=7LLnvA<_T)gAk?alLpTap@KpywWK7VPfh5%brYd0N7 zS7X~=72HeSo1y@1#uf57f2~F*EvRGRLZUMKx5oU>pKrsJ>F)?p`Dcdm*l0o=I&;&` z%`cJ`@j~^IwFuY~$C~i8ogW@Aneiyt^2~SGD!#t3B1QN+k=iOqj0;xip>%OH%&Y2Z z4W6KL(uix;c%i(CS^GO+d08VeqwtV^jADwH5RNRbHc?7PXTK8{9c1r@4~|IscnoF? zQe3q=oT5X@Kze^N>ZO<7o>S1GEW9vy3$2VF6ZIWa>DAzZNSg(!R+5(rw9)G+Ww7KJhr&5~oif8`GCc+351s zK53eDQ0df0ypK2?=z6;d4E>Q8KHg1k^oxKyH3ikG(ba9yQ+eq#;8rQ9dWaLb-{YYH zS<`r6jm|XC%_qdU8FbGhQCpcG+!Q1I4=jOAObO`{eRsy<#nzSOOsl>>ZjWXdupc*k ziIhqQiuZYce*kvQo-q=iM@!o7(!iAk2`TFXdx>QkE2mX(sb$`e6gs{94AtGZi!kHI zvta3TKoacbS+%3QdCRMZPtqe(J2X9?LuzE2^S~i4UstqGS<|KR)Al2>*(1oU?hlB53yJwQ<) z`Yh>Cv8DX}UxpfFGL~&~K;7J*SKDk{Ot)+JOk%;yI)+q7=S`F%6dxZ?|92#tqv!JS z(&e2z;$nQ_g#2Us`~AMV3PgFp6a}KHnR^Kh0aR(zNzcq>NdN8qqZ8)09AnT2H2#Go zXG=qq?N=&=?o6CMYtu8HviOe3x}FuuFAPh- zlFd$Ry8#Mq3~#l3lAo>Bje+oXA_LBaHEg=h{HcK&YCNH(WsE~Cc@C_iJ6sg+BgOs> z@jpmZO3DlI4hFB9$$o#%%c~~!@S4wyE5p=!$9Fa4#P(S5@P4S`3fi9*+vX*dgDTQ! zTA)e3gL9NBZ(dJGs_MWX;`!?jpi-*eBXz<3?*ag*jsP$V*{#{k{I4eCOhP2&=mub* z9v+U&gfvC-Bnpa*jiG!2XOb<_eY;KnpU;xee2C&<3h}engZ{tX#&TNnt!`R~HB#%? ztdhaI?H^srA2OA(Kd*$3CarAbtXpk8Aqp=Fb(XhblcMQT(`F1ZC$9|?h7)FalFKNM z$T-Oo3v;IGHz-5-C=R#WN-E4BIMCt8tOuol^s3^}-o&Ab_hq=suvB+Jrl-ZdI)8JM zmTo7vAG?KpOfR-QuV}y+83No!N?>^e1?)DqA~2NSfEghFhmFF@!=tkEn>Q&qul*m& zCIUolZZq#^A(^~N3SC0d$$4K)U<0iNo|POkaw~%a`F3$5O7?bfY8Lu8!?nT7+gl>Kds#F5?o&GA#p?bJg z1l`4mo=bJ+D*h@_YK<6+G@@nUdl}n$jHz39h_3OybhiJvgI-q4;3@Umg4t^UG3|Fz*Z%|Hsa1vXlp00ae)y?Wr8n}U%BP@Y6v3Q;9g0z5%m zR{F-QD&V1{!WD?|(?EFPU3|qOm>HGV<1dVjVpWzpf{eu&2c_!5`@Ythtq4k zR@_bfyMS9VbviEd&BKxK@Swc3sXP;ngI&Jj(E3im)u&rmz!`-nU)go^Qmn${pGQUI zY2Qi^ZudgTI|oUAyy212(bq~@WfCrfUXs#BlIr&k=X=Y<9wMBWP<96?b64g(abH~= z_fYF+hHG;O_t>wuVlzLyQn7aYqlk}7dPHRV4#mz|>`U_U{!89NNaq}`_{67?2m z06XEsTCfP@(|7aLm4|Q;#OX}+-}OSB;cU2^SLA98GO(S;QHW~^P?G%5Df*v#&j2_~ z7Rl*o|2ao?SYbM~Ikf$(-`cSFtnBdU#|RzZncwZD_~-O|e2-B|5hejAn+hq@Y#*Nn z6FB{X^IL4HL%C2ibKSPFNx-9o*;OT|ucRB(N-e%}zZ~#9J8I7!JVU?>QX- z@x9w5Kgg)YTVz4@c-e*)1BI9eYav8?fSf1W}&^eB;#1`bu}` z1tJV_H)t|_fne%rB&Mya3Hxj-o~iCFB$wTh1K8!+yjSk-1#z0Jo zG8q4dNE(OxzifVVOMt_r+g#&o4CKL2GOvrANGxudGZj<@3jrn0!A9u+>OudU8@J%o zZKdnemm-elFg%AgL#gUWChc?bqxvuT@f+YUJP<|hOpy=2IS4~#m&^qKcnfuE5v6c7 z8Qit5xrA>G{)Cm|as2}WaUUu)b-KrUP%eDQsK3dx*6+>|lA`r3`0L<(x|*sC;rCfX zr-2wScsP)Y!=iFuYw&rfOr6;V_A2RR`z2F4u|8BsUpJg5Y zryt1nMPYvFZY$RYX}J2CtCV;}4z{+gnBnigQ=+G6G~L7u&}@jHe=5KKw@Ffk`2QpG zPDCRw&>t+vp`&G0j{Qtwuu=>PhXbZjcmxErS%|MkdV5hR4GpbGrsCx{ntqg2*-*2G zfU=Z@4R#Jb!pD5WPcQBD-YIA(k(6c>|BdiV+*Fa34b&}5^80>c*zS)l6H6{$4~MO; zibJQEDJ2paivn?K-pPH7c`0Kzg^k!W=ORr*JM(?9VX5t6bq{V*5u$lX@+t;sU3F5B z!C2`T5EM}Td%q;c#21YY=e)!^_U6Sm>XS{^7S}zOpS~epsauv-aO1Xi7if4(zqrH~#+Df`UN@k!iea|z>=B%K)&+r#O+Y|<$ zEikmw83exgD-9!yqv4+fR<4Oez?{;~Ty=$}ULJz@m(|iTo3!IoZREJT>s1*_`o7{10@4Id zH-q%t_VX`s2$|mM|2y#tm}JSO$!jPn19azDD8q>0GxVL{P^Lm_t4Qv7hFDB$t^7u6 z5(n819&Th2H9ZTh>GM62+0bp)wy#KF4U+gPbCf^NR#N|Hgm4F+u}2_Vhyxi@^Apoj z)PE%@X%Q%Fh;zsnMm{+6@7;T&dQWAC2u&5-VZmg{jt zP?x`FwtDB~w2Q@1ioUEZ!`7`qkB83z)Y_aLrHd787WU!i(e98z}LvZ@Z#K!zR~{I1Cc9W6S*bNLBf#$k!p{+ zre*;WuJG&v{;i15bV&PsnD-StuRB&yzK<#(xW4D??t_J=Mlbts_fe;Hqbkohn*-a; z&4=1{k@jS=_KT-WJx6BKg^q^uMvr%uVxGzZ$DpGW2Vb_MtGL!IA4V-~VgGyog{Ai9 z?a=L7!e5Pl#DTH;?e(nhMZNuypY{$Mn}0kAvfy%aGHZEZM3L9~E0N{Mi4b*B#&?zEEZFJ+uiI zlN#L*?ntP6b#8-V|G|nKc|s`=#0Rs8I-v=CftW%M%1+Zc8(E+U1X*B$MQ5YE#FA}l z)<+^Rv0hhE{rEXMdZ6kib#Ps`w>Pq6#8h5ck>cv2#zSzvK!A)6_UU51y`w+}`|*k) zH$CEj(`Zx^OD1YZVQyjFCasryEPpE7AKXL3Qq0X;n|+rgeS#eP{8}N+m&}LNM{hC$ zI>R(O^>V~SZ_vu)XTtKLqoW0Hcg)J~FD`EA8y8Ob<6Lhe%b*evqgQw4+?PHI}BvTn4i+>n=l7H9_tS#kN6n(Os zB^n7QcPS!17ri!`bG-f_*Ec~gQ3t*Qn!;4^+Aj@fHV=Ce8J zm43KBcRAF{OsKTn5!|%TaX7Hzu~8`6XZ~`9?OPOb0l~pg{P!l(knE6qp@%dF*DQ~x z+jMEhG~BCwX53A8MNIIcqu1zuA+{mN_DQ}#r}BZ@;BLQlcG`G`+1;SM5E6vY zBQf>Z#S=N#M`Sz@bpi@H_=DG_mj^OlKX(v&RykRuw-_!y69o?8Bn-ak7+ruakem1J} z>n2%jVfkyx#COIFJIjeod&>#;!{LQXM1w1|Y8H;Cq_5aikWs%!tlLR76`M?rl}zyI zyS;_-#jcU3y*xsyUA^VD#RcgbsE~u2pG2#&7@&|w`jRPnYt_i0l(~j79Pludb-s2s zx{H;pZxggnmw3TEIl^aPnOJRpy9R80BCM)k1h?8#5Yz@=#!Oy+nUlf-7QmvJU!-0|Y!s%N{_ zVuTdtJ7iC4^{=5`kI+?3EkRjqUC}Xehma-7b`Sy0JFzKe3{szac-@>NB#7^6tCKZ7E&L&P*0%suNx zki(VEDIIVMUIf8`GjZszmig@kBKoyKNBQ=?i!x+c3uj%y5&;ng&TKt@BNPM~-TDri zSq_5ev6r(J&)mD2D3RzIBWT>~^({Fsuo1aKtL$Ua{?$7g!M-4D{CF$TT=QNC6U@=F zV+u15vL3#SaL;jemiulonwdXi)$SA zE;A@q4tEQf{Pl1}t^?U0*YcO@hU=GH z^16s-4gC%;E?Zn4_ICqzR04eE`4&B^rDMp=5zetS6Bmt-Jiejg%%1wgX)~+Cnz=Ux zzPJ7l7eJ20+f~60j#fejp3BLkVd;qP9fW-ynFZ}WJ6QqBM6{L)Tx8ol1Jq>s{ z2*|2*KyIe-Y?69@W*xh+|CQj{<}mBetEHopUwS|eL}dgq8th6IcvqAHWo_Q?5Dko! z!X>l!pFmfSGb|UwEuUh0nRHw6w2 zyF^Di@F)WhO}Isgq`O`Nt2XB{v&Y@uU`csx!~zVhiB=|hnn%mf!E#G^N+3&M##g@P zZ`T@?-;pbST|{QvB$#Ub5w6V z*=v8a{pWjM44Gw)!)aa3?qJJhsE?7JYToKyTfeJyKb}(7h5GGn!J{vzO$Hf8 z^_!2g0y5rg z8E88M_BXwp?E9=hqq}hG0#<23U5u-XPB}WAEO5_?R|vbOy#Qi}2q_5igan%@{dkqB z#oY2~uSDj2e5cKwvgj^cq=m*^xrfZL#lmFYg2MlAoaV)R$&iW-$C%%89i#usW7ht! z^_ZFB!pwpNht>ff_;5J2>-y+2iYO?cQWQk6EL5t6A|Mh(KtVu( z&_V!3q$WrWMVc&Ri2^o2rAsFy^n@e?tRNtS-a-P1bP|vjI{z8>*?XU}&$xHF_lG;~ zxZ{3-ghBF~bH4BUyzQBDrZ~foi3kdg{}|B7QLzjC$m`Qs_;!gK9o&sq=IdJ_n|9Ff zh|N09&CwJ}&`P|82d_`v6LB}8YLP&pYNWJ}-V7E~M6x-q_oPin#BLh4le%f26XG{O zQJeYj*k?Q=DrjabHK^XvZVvC8y4tKBIBUV4ik1Op$%;B9Vg2Vcxcihr#gi@#zKf`0 zPfZGUBU01%pze*$PnFvTXI9_0R7g?AUtQ@9+N3<+qpQhH!WAsdeL=j*4HkUFTo<{< zvmH&y5R%b~~NF&aoB(?sP$r9kyjlxNG9evq&|k+xdZ3i?^f|M8-+; zF4WBhCzxyaJsq7$+n|--}xEqW-5)NS?otkmQ(Ot%F%YW{}FaB`F+N?Z_ zy)% zx2_D=&5Thb!cpJMXH45=I`4!YSm5Q3FGx$tn3}oJJs?CvB^-HVn|Jce;Mn&*Dx-g? zA^b+dn>QaC2;X?oTo8E3xw7Fg;jM%LYSzjiV&AwtfG*ba@2ceeAUNsm13yC3rxLz? z?TjIs^C$LJu|{QAZR|Od+Lt^Ms2(Obc--88yhnr4)Jy7 z8E#HJr1WJGO){1Dm7eu0n*sYV0V=qfHh66!8dLP6xn+ayQTUeK)Ge1Ro=LS^*YPkI zjY2lwA^bkiz#|tH*o;n7<4YM;RrhCB8WZF?1$h&xB?7dV;o2$OBHkK!sKkI7CFG=; z#&V;bztB4h7P3;Jcf`B`c^1J44cpMdw=z89n>E-F!%}u*{jJ84E9=I$@EaOY_bz;c zl_kPoxix-bC#K2xh%+AaI0v8fdfe4pPgt69LgG9ghBWy#R@>d$bLJ*6n@7(_h`S^~ z8bCh^O{a-RU);_SrWGV*;31K@LV7k@ETp*5C;Ww#_d%7Y;g&M1Ji*BqZ0F-+!M4xE z-okz3IeO`0v7Z*cKWSS1_{7yvG`Yz8B-_MFYqG$H-dim16Qi5Si1u^(kdT-dB$S7{ z-3|Mo>)trz(w^f||1Hwh-F;#6E@Sz}0a;mk-J13wxPyQqP(dPD6NyC8Gxq6x7su^| zM1(M^ZG{9zps9?7Y`4F<2U>#H(k^IGXk=`}N-{94doxs_S`yvG{Bkq1aLc{v{#G!y zcrEVI=t0c5Tx%84dm$XQ+SSw5hMVc{zbIri!ny@cp1!Q^jb52^3=FOHQR+|`zoSm> zPGL_K_{3fKc1q!?ujUTVtt$W4%O03<6Pn-3U<7|f7`{KKX3KhOWo1QJ+4#q$B5PtB z16{zrj~-<~c%RbOi0AKnBl}q!d}_-ALPEmC;}JxX)w_*IDmdzhx0F7xIz1Y7a@I41 z-zVuMc0iZfNwU<`L0eXeaHX}iywm4m%Gm6>^9JBkPSb^%#~>)nN(65Ln?y1>F~IZF zEi3lusTI=V;?jKi-SoKQv6Qdub{J>$Q1;iSsz^H=S5xr4yYQR##=@Ib8} z5ce;EFWEstS*vvDE)e)#I|!!?*7zF;JaolVn&qD4NjN*|qDUV}vV2jf;(5(ZOlnho zJuCq>%t+CEH4 z+j!qiAx}lfoQ1ILq`k_f?PO(J3}1LWMhbvA)1fl%rX#u|mU}~JSyeV-f+i=;bEgHlz zO72E5zUm+!v=PA~YvQ6^H)j0CQp2VbO^bmk#P0(V{k2|P5s zP{euj>1fOAd0%))Nb4N2Ia;Kvkd~ei!7WYbEV;dd=y`pI;l7^j;IkUg%xj4tJ0^dM zUryTX(=OA3wAy!zHss^|qN8ORlJTkNKp^y89Hh)##relWDRC7wyC8Ox5qYvdA#|VD z*4%fz0$PIQQ3(v?M``xhse`smJ-^yNeuGFs(;LrmKvrP346Iq2Q&b6O%{CA8FqOZ; zL;P!| zDzM=TCw+ghgEVnyoaESXM?HmfgU}jDx&*h_!ySHwSC-z9RX2d`=vRsXxk<3^Y|BMV ziy>jvM%w29#mN5>o7ZRhnSY!0@HyDg1& z912`!htAz=&^K79P)PN7&&o?F-sN@PV`H|q9;mEgbwJMI)%2;dLnm7d2Y~Rh19`o3 zvCgIQPTNH-pT{SJCs3<@KWHSntFVw~O5r?4(Kmo1wdZSReBDczdoK8XE7F8t$%q%J z>EN)A{o~)lkK|$Y90oaA4iq}5mBszBK8}=3j`Pg>5)DR;oWr4)ruM^3oHr4T> zP@nxLQbFhG)YgjQ9~yeVsf~Tjwj20^6DVk>Co#fNI>Ba|0mKi@?J<*|f}@pkLUCSg z#rmyI1Hyai>LBu8H^#V=7HiwA6~0o&!}c>h(%MVZx_Kk}@jC9r>0zO|76E6wFlb_Y zI2efJAjq;1np8dqZLoB0diHhpXmq%|7HajY%yFhF7_x?w{Kl%(4$`HhLlF7`pASa# z?dpn~P@v1r6y2rz&ev=4&eOkLMx+HNolQmw`j2A`108QtcX{vjA$#7MJ+tboVXt5D02>;_sf z475UgoHU~7fq1dGsTH$HE;jlYG&a6WdF=D9<^G6M?$Bo0rL|4_3!D4D@aWeBI=uMs zEbo$1hUI~s-?Na8sK>HGq)lswCp*KNfx7vbN3Z=YvjOfViN|;K3K&;lH5e8(&4F=- z0!*get-8&#oHJu3Fk^rj&#i6ubxGWH?|LlJm2Z42p*hKU66S(&RvtbOuB+zWI{;f@oh?a*{`Tw;vcor zkB4U`z3J#KXj&8A343`Qqf9tM$kcUN_60DNtoZOr7?@V+o}W)#2NIte>%Ha9vA>XwlT!WW+A_g2&Fa z4S`L|dA2VeexF$IjFfKTUHWpSd!NAg95xIZ$obN-s`v^M(7-exkTpkoPr~bBrCZJ}olQSskda#1 zJ=aw))zZ`fqXy-Yfh0*($>E>rAfBVXsdfxLFq*6J;7!5z36bz6pmtV=9ti_XE59d_ zW}KdM2oz9+jVU!&3UY<*>x+hyrU%zi zaaD*6+Z8QwHCbbBzNYqJIv&Qn*e({qjP?gM@z5XkToXHK> zJB)KwZ6`3DyYEiSM|0lrfajZr871zT)&*EM^2`~G^a?Nf$I1@U>jUF>*_f{PtZy;5 z#Lt#Ox%ZsD5gV^h!oEK4=XY8J1fl}?&)=Jows!4>u+CdTeO8q$1%APi9WTZE_K%PD zj9tG(eGMh3n~PHaj#asOV9LLRH}eA9W<;z1v~4v7up9w0tSgd-I7)e=dHNE^%WZ)H zfZHwj{4@((yP)csiUZG;M7}HQWfez~nwF+qWJJsT9bS~m`p7i#<|}GuedteL75u}@ z9j@r)wQH=*!(m{r(?pJxTz^YeOhuFMC<*|SHJSN6MD)gClP_IDN<$`~!mZd=Xlp>d zd8Vol?cEqA0JVP%HAolxI+1Hi&!OH-HfYro<0p@q)dVtsIW5BkS z`iUb=ZKgoQHOAypbPa*%dXKd<^+O4i?1N>J-E%fN(={RLSpCq~6;`;0HZBrH_KUAj z0&AUK##Vs6-Y6s2EH1vElskH_ozJm)1)nnCni(y-3oqyq)mUM52Wg{;Ez&@GKUp;G z_#8qiYbwO(Zss>fw=!k`~3o^j@Z=+wiFLA!L7#bt$jN964^_I<`gT z@uZoR#7z(QN%=400B$o^?@V=gGHc?fo4IFQKDwy1#OQXo)%WfoT}$FpyPQ4+riW0; zi+#^V`m)TFZOZnZTxsl>Ts^IT-GWh82%ZC=RL z`YyYan?&98crhe7xckcGU^Aul-CSh``7MryKQV0C2$(I}^aIeG-@=b@60_qu&l1PA zz(Q&`p9@}<=6J`05CnuCn{X$aim?$@or1E5h^#`rpCa|>==@}uPN1V{7B^Cr{jgX%f zun)}a(T$PUyQV=Rz)GdHmU@G~qLvxKf5u+Fh0}qZmqLF!?ERcjXcsK?(YsUtU@W4zoI0qi&{T7?=sM{J z=}G0Eb@jk<;oim4zVg&)V%@7eX%X3fZ@-r>kMa=;r^+kTx`Tro{Tkt@vEtb*LjiG4q5x(D{Nsn88N}5;fUi1c79a%)0=|Ph=2t$Q z4E{2f{M=6@Ts|DtCM*U%10_S$%_mU1?;)~T3EsYN?rQjv!4HTtVAib=ns=`pj36gr z3pOJ6_}2IxRp=x>O5K|p_fx_`0QGUso(exL-wF6bb(zn>yH0?pT1B<_bw4i%e(t70 z!BIX=2G55ipL#;DV0+yVvrCeYeGc8IfsL@Sz$ST`Rh@y<3prQN=MtppOA+*h0dz#B znhMeMiU&ZE-ylq|YNhA?jioF$2=~*bCm<8Bu&JVtpz~2L)Z-~LA|64PBP!73PNL1n z_j1(I9t||3kpHJ|xdObK)a7`mW5r_}bwTMhHUkCnYd-CQ=D?K{vZQq%2tLfye=qk{ zVOBPIO%=Td-<-eCS$rC`+n0U;pO$Fgi*g-}wEgLx{D3Yx!hTx&iOXO;_7z+_5Uv3d zAiB4@wI8s5g16B%@?)?#DlU&>Vu5GS4i}GrOM9!I7~Z+#xm2-6i!TDj??nmi3mHVU z)oKG3`5FG`1AklbGq-rUE4(&Gin7`4qp`j$L8(iOPZ)Pgv`P0|d!l~@eenQDt_8p# z9{sl(0T8-K;vT~BFzE9gc@2j973q@2QIJ0>gLdhi|G5l*fVGxNe#OTzrze8^{Hqy+CXoFnD-22m~M(#K{WpOLSKnpl=x5jpjnov#zb{1$9j+$`dqt;KS_m3Zc zYJ=JHU3|l7MT7Nw7fGt{U9_h7Q6BCUzw)XvP)jlrEcoe9)TcO3KGMp;gG=z|d$>6g z`p36GB{^B=g<0olj^c5?D(ewYYY>&(N z+lPQtdIJQVQ-3A<#~IG!0{-9kIG@YuCHdE8jMDN0c!lvn=#Y@qSdFN9!*4+m=4DFdQlt`4)Rb)y63`uryX}+MWxt&8~(w zLR<0xP?7VVT`$&}Ie7!CScT)$_(XQA{1o9oe`Lh~VfI}Y+OG3kxFV<_G78f)L3R?z zG)!Sn7#D=rgdiCwR8;g{oP1*8f$>_LAA!&Vz|mC})xp?W`0uQV+~mAh8@V{Vt+&Yi zJVH)&?z7_$C&(%`SX+wz#=7*Ay+E6RbNHW~?cF}=w!ioPv?Y#4{cSi;<$*d*oHKm_ z?57Qu{(DQXTb~b|9YIsP-D8^)B1CBNM4OTQ>b$KXbPt<=2qV=W7ZCHIc8iRU8=G@+ z=99TK_pk7Z$h9(s$Bp_z*NtX+UZ1Uu9*B(&QlC$5hRon+d(lt6}Pn3<4Ska zS|=-f`&*r_cluEwybltKEc4&=SuZl%X=JBmTkZIGp}EvyhT~Gz%Bu5-Yfz$8jA(UY zZA@<+?L!};qQW$8_g? zEqKI#u2E|{cU}X__b!W_lyqEF?atFB2~G3t-#;_Xi+yEo50X$ZM#&)Ai)Vx4@OXH$ zCjO%J;^N|J3qHpd#da^gq;K$;!pvhIQM$Ym5RhVdw*LY2Hi&Ew3^$k(07S03D+vIv zu0w^e*AXJh%8H7U zpA&7{GiyRk041TN&sR)#uyFoz({QZXb&-%IcV5@|#jo=UlOD#J7=ugbpBOM>HKpR- zmZDir#f(w_e_!hJbQw0L z4l{Gf1V>+H7y7ik{2XKC?R+!dN!U4_tJs2Zj~uHpxhr^uSHyqVPm5I)I=@x!TyH|3 zr@)(3_2xBg@{(rOOGKWNXS-f=Yil8X_^N;OpFX&;Xs66jVc>7zTQP#orS>#RqZ3u_ zqkdGj%0Oz4=GO2b58F4$WclS(dvlTA{)MuqJzOJ2HA*mJT0p(8aI7Bl2-H>0yBT)Ig(EU5EYCU-Q@$^0sh1xzQ5yuIJp}4cP_x+&;fcFe8h9G zRS zxBFowwiXhF&nm%Nx>KxZ6A$Z6n4nLb z_hl=DAmP{fJAc$2cF90l7Fk+O7Ff{9hrI^xU@8&W9=5?R8VcK%K2VcKB55)fpqJn8 z8a!DZ2VZo5#0Umm?~Qr8{C-ILfmn?O-m3$}_P*7ybA&(Ya3uD(A_VI-0vTRPR)(2b zlesm>bw?`o0)!>0Z#0bUDBslLEhVQ@XC8QWcV}>mr|%V0o;>Nd6}CB|HdL8eb+6xw zEU;=IVwa~h)in{tzBCBaP79csip109WMyXyH2F208}Q)z6TQ$Gn78ff$~=-baF+&I z(V#DFkSQ&TZiyib{118-d}j5L$bJU!AQZXa(6vu#3tzprI+5LW`N4W|ZNtVJI$Qpu zsznV;4#GL4vFm9o;i#%%JzG1%{V^{&TaryT<9;qu?4qS+)y7T_-XU73e;mPZmV&0| zcwufMgDZBB67O&57oGF{GN=`O<#=Ct{s(EdB;R*gyH~TgQuM?_@?gbX<7nlaTsg(f z+C=%T@E0co;RmVZVGB`gdXcH|>MPuGZDJMnmBQL+XP?`+K=5$sV~TV)g!cyGb15$Z z5kE6*y!knG{sE;i(PzC?cn^(SrQ(o)+GND}Rw5OG8_~E~yc^3seXYq2)pkdHm>q@g zACIg;88}&Zu0?5X(J}X{*WD5W!*Livzir4&q=iO$RsV-^!a_vQO!pxP&HAMCB6rCl zq4#DKWc^v$cJ%pRWMN(lQ?s&;hFWn|ckovz^9{e}=ZBBRUM4xZO&Q6Gp|sLDwo6WV z$qkS=Xzx!z4vIXd_Ah{p1CG;A=W!4xDa7%D!ZS{7vC*#TYQ#==RgX>tO5G7{UI=ax z?Bb!1dVk-v#C4&mk9eKLl|Y66K|WqRW|Zq-cW)YJzWw?uG%P%5u;5plvuNJM1v z?Bv(xRzX|Ftc3z6dWCC~b?|V37#4!RUPGFRLh$$$k$&)Dnl*In25yMrO`91 z^;F9I9ZfV#mC;+m?lG@BuldU1));?b8@`C0Tp>IjvQZ!g1fdt=Ng!~|(Uf!4L)T4H zZqrUCT0LFuu-lqg^qlRghVz$G&ilX%96h#%D1AZScVPsoKR~`d$7(UP1DRN~PdV~5 zvv~$dv>7BPf|t^<^et{V>(Z|@d=Km+DU6*NkD*A@0hUtYQnG`n8~iRy{VRckjF}*s zJlU;+i-c>#Tev)mY}aJ$AGaP4{`NyJ=vF0J*?MG_eYXOhm><<`o z7kfcVT>;^xW!dQ0`GQo00ByCP0?JO?3Nfk){O)O(e8xx{t43TLnQq?tn)}irzEb`| zXm9hHFYN}Ih@Q7_Nuv|rGpwp?c4cvhOFDo@E63?iB>f{r|7RG?dG!etF2uNx7;-Gf zXqOKkT_(%RJ`rrLVM{<)MKMErqIu0uuBE|ZP3}q}j zRh#h#{l_UK1Hv;gZz+TAX{BdFa_EFUSo)N7aBi>p zXS8KJjuySr15Y{tfp;QM2m$Sb=L zd~L1IukpA${5G2CM4Wf0YOGyXxDdS9MDS%(B|5%8I^fe+FHzRw(@3tmTQkG>tU-(X zfL@=y6kO?CBEQkUB6^|NW`_D^*oyDa#o~|9?vxi(sR2c>?c}mBGe?z<`w!j&?Von5#$&6Nsn?#G)TE zv(q%~9AwAo#U&r4W7B4(!w8heuZ>E=*dQhwq&!w{T&hv1VR603UG_^)F`X`Wg@`Yl z519@Rk~>5Ffj6sO8ddjgB|1%gpr%rg>&@*jK61k-+Ue^;>Y+Uk%R@GH&P>yaZ0A(L zpqr?gsR7LxSw9db0Pg5j@|o_)?>LiH3%_R8htCF`)aq0Z;KGy2N=s$aFKM3?550cU zd=EkL=|NB%`cygB!2|*NCHUT7sOfNuEG6~$eW^I2a55fEY@xymNn!V@Vctfep$RF8 zL0JU360&l42gtv+XqTIhs*QfpCTP6P$Y@p1>`I9VnyZV24P2rSf}6N1SjoNlGflJ2 zUcRs@`4fPBDac4 zYMU#VFAxILjP{s1WX&|*B(;3YUqRNF>N&J%#_N;VORkI_%~;}lF)d4@Jai8bQBfAL zB)71)t9x|Az}0MaI9XaCu~j;OssE0vF*#nkP9{J2!kF5K_`NJC{hrW3{>&BxL&l9;a(<>5mVYTsTlL41(Sjp`Z3 zacp#8sRi-te+vOP=`tws{Y#qlzf-GOB2-b-6cL<)&MD+Q47ZcN??c;Gv(N_@ZoA|Z zUtw3u)0Rozh_UIJly;OBE$W$(_0NWn&@JO|}g`LMyWCB_j8I%|_ z5J;|zEC~`#MIADTKN-)D^T<2=v1lkY3N>v9H>fD-NoGvL~njgZm}^Tay%Y z%xuo}$NtZpW6*&^8!+b6=8!q|pSp2ttoPFL`0yl+5J%9N3vpl@+Iw6S1x#^HS#R>t z_{zosu%@xUu%=fXwxOF*Fy9LO=*NBmD3oZ;5pBKS!o@*&X0`vnwXhA+X`g@p$o!Ym z(j3S7vC7ZbVtx7BWa+rM;Vv0Urpr9O_yCQb>{*E>a0HDER@(5icP>Fqg z?5~48PMNau4B>65^;Y1}?^xS1+u59e$^8n8I^w zE^Z{JZVb3FG=8#(%3t`*dFs6dOrz7Fx@@>D@S*QDFy-ARe__f%!d<^XT#f+=N*#!= z{m5yEe&=);{-Tx?2AXMZpg6h@Jn+RM{b6u`?yCFT|EgO2Z*u~3PE`7j%HE)lBnp)@ z1X5_f^R~_Z6{jhA@fXr%PSatpx?H>t5^TdGzu-?nP*l28;OYTje2TC0 z6pu)8#1!`{VghQs{Xgmmfc_6C^8%WXilLNrOcY0q^~e8zG5#d_{XZ{8{O&86H(_Ql zw~dE}jG0aq#mRV;oXM1l^OVqLYGeD^D2c2HDdX8qRY9J7y8*@|%;d>2$gUGwKzzHu z9Aa<*eGvwB0)G8PAIkkekBup&W^*baxZE{{9rWy~*^~zL}+-62z@Ms%v-E=d8-R;~LX<*c9N z`phhb7N;NF!KLT)F9^IF@Ldg0De!aXNau9Z^G_E}l#Pz(*KEPvx{y80AnpjQy{Bd{ z^UZ27(*7)h})g?HnOBqieo zMWzZ(jXQSN9gdX*i+v3&_O+Hn{d+ma`OoI=|7GR~Fm!)j0i#e;b0gXI{z#Lb@0^#n zZ^td}$$sPD%dBpjU6M}^1B?HD!O2U312MtB0x><;B=^U-es1Vo4Gj&kE*lH(8Jz;1 z2~m~1-{tA-x3GGr*abrEu37jjUI!{E$Gc=B> ztDwi9hO2EAwkt6%Bjf=I3RdceQ$hJlj)M<~u|I{ya_?*H45lMD%erE%Na zvQ?#9M&QwDv!|Kko7pBa$;)X?2LOcu>^zfye6*F*%n)XMo99GH+zB6jbz5dCOBEJvrX;=8V9oWm6siJmJxvijY+m>-BRG57-q?JQE^w8z zQK2pGJpMSxg=_qZp8FuukG}0$cXiQ8Io7CRAYqv<7E>N<^zH%un+;c-c>3TabF4Zk zZ)&3jFoXIy9$)g`=~9P5jrZN7Np~w6oL4?g%Ft@<6>4h;)VXc`oSY={RNTbb?9l_$ z)l>JN9{Fc5xqr?c*>wT|Fks^Qq3)vq19E;X=u@zF`sRv=fRq(<*xXd0kgq+PlPJ0TIajdm_lfKwrQZ^5x8zi5=3VB=hh?@jNO~ z!$D#MaBaIyq|xERcOjIlxvJ$&_S^I3tR^pmrX14E-!M- z>@*2)+Zq7lk;THi-J|9`X=izNM(`}woBG^;)M0;Xwmc?UkuV_tDlvQT6N86X=F87I z?#TU`i__%-@;~{s@^{WAuF7Rn4Tt1`Vi!sO+)n(@_Pyj&X->ja-_BtIwP^3aqcDe) zkXo#VfuHZ1OOx_5Oli_l(@*-}+3N62o4zfagXo;JzDg6gIUVmCEt_ZJ{^PC=tv6Lw zVAmY>7|d=J;TB+xK(% z7swwsiQB@j10USf@!FUX1h&RgNv0Z{sNK&s{XC1z0-u? z9xPD|qmj-xIwEnx1;^%({aZG_d@Y^vzy_1Bh&K(PU9wR2 zb%l?F!5BN&Mo-_fw{sL4llFU+5|kt|xt>HU_Qx)|(9WnB!;8j8bCzf%dxEUR`vW^q zUuedoSf^xJ>$@rs{GGN4@+&P6UrZeR>!2A>$)}V1d!CEx9C=lAl_?MD6qmhB#Bcl< zKgNYL7=Z>aYCej5?lmkOF%g;<>2i7sxD4`(bW?n+ ztcCN-6smfcv>#F6%RQjb<44?l=UYaFJ^`_X-7NYQQdG1my#P7CUq`bHs=vd`#Se$y=&-M+*d6Xkx zbr%Gz>gvdd_RS0+KlaNQzmMZL87vNY9+ScwejV&SXv*p}Y9j9+Vgq;MFWe~~~x zJqKe`}ZE8~& z>pYA?p?4hJ2bWCxI~PEGSY@gsqs?>Ug~sJoZOLq~uBS(+e>QX2WKF+Nd0e=&P3C?K zQIeosaKrOPD_dE5eLM;|aG|llwr;^>+|5DAwli(4AVH?$gD0tA^R(1A|_%xwr_FtT6AF=2~O z`t$zUDU+_Ai(FoiYo@$~Zf4_4y#+{=bNfjUWaWg#-UmBdrZVftGqjI_)%x@+I*o(i zv0hni58wo>S3DO{qD$ts6BRE~I~sS|{Czz|5N?v0Bhl2S+QRgpNe?JnwKnbRtTD^a zS@8u-0VHd_87ueo=F%0~!+?9ZQr+*r>wvxFII*(GmQuM)0C0l9vai3M=%9cRPEOO9 zum3uO3Re{Tvs15AvCQ1ioFZy#S>#-aT<%+qhY0(R#!8#Dz77_I-$GWac;~7#ODhyh zTk`7Bd&#gSMwwV*7bXBX+`O$sLxCKMOR0CGota4~viCfd@c`_b-#)U^K)e-tTBT&; zK4MXoz0uZdoCwFFkdGw>rMX>UAY1lb&RY@%y7&GzeJYgPG8 zLxahx>Ng*UQf$>;CS@m=a5aeCzBOC9!+Z)?flcs8%C2>$o!!we|8hR%k;%4jl&i%F z%1O>PkJo*#dqqI_Ihyh-3wYqqPTkJgH0e(H0$23+@GzRFD!g}ijw1KGtoAz+^EL*X zZ?W7pq0U7L*#20E;#iZCDuOA~ADr-YwMI%9hzdTo+UVG&Kj_2JEXg*2zZmV%#-#7dZ&*b$1x|3UewSWL8WueGjv5$TcHz*~55OTGFiQR*&LIf`2Kij*++VPnvtC0B zFBPnc$iP%`Vd+VZ8Pl)!j|9K%W@-2OZAVyS&>E$E>ilGHMN(P|?kY8vyV4zfU2W+- zIepz4?_g7-adPIHrya#Liw>YaDJvGNoGcT<7MJMk(bvc*8Vo2g9_H?tm;1mOeB% z8T)*Gr=U#WR0tyB&%Pz9p+|R~pLOKz66V<4!-mq%sc%|5J|C?}C5Gl1YmZl=afL2o z<4dL94;1iF^mUiSI>YUND+~-B;Uphi%c-{!f1&;H&I6M0SlL1`f0U`Ogv&Q=3W~Q) zpK8t$-zWolN|f1F4s5)-U9F`8`GQ>vF2M`#C@-*`R|v9*Toc4h_iRdEBE^-;+WU4V z@E~}7n3{56VZC<>ULzRw9Dn4jQ&w=q7JF5(6dRnE$VEycKvf80`aUvAf&o4mDKvQR z-TUDtygmwtv)}!}>g@}NvrX0Abpl|iO8wxW9&t{=>=)2#TiUKlWV~2nH)aXJd(Q&? zbZu2i3F+E{SM~A>!08TXT(ipVb@lfsbG5_vD9=G#3f!Xv3ugPPZPV3fzU4EzOL@IJ zroMZ{TaH`#UjaM!bY)v&ropx`wu0FpeuI7Wc49bdY^U^7k^W34yo;HT`FHl?&Wqj- zNdtT@Ez{Q~_VFC3(Y=C^lJu$qg)d7sS*?6;06>=byc?v+2B> zW0RibJ9>sIPS*Ixo;D3gJz%ICruybAf%B)zUjIV3OY$?MN%$SE5`|M%poK`G33f#;WR$x3^YnjS%Jc4&m zPrc0GNxg7}CGYjXz+wcx2@DEkjBo{-yD zEuy}HbE&W(cj_}j^zBIoWnoj!ezCvUqEcM@`K3<=Q-vRi!1lgudH)}dupshc9E~G5-Y=zvUogH&J?Nb<8>%$4I zy&BQWW1%v=kPSVReq-x0SL}H1yGj$Tox(o)QYNj11ySbkPLDgZ4fz&1RkDrMhL_sn z6FQ^gPB?49kVli0&O`ts&=Wq9?J zm;Vf(Zxxh11dGNOtS&<6##ZUH8Zij{F0YT1#p(5T{mKrzdoE~4Et!s2Jrj?8CMGa` z8swcCj#3+IM>vjB>=zs*IA6}@P*;OQXkGzY+5h+t2>reA_$5*j6s?n4kZN6q{Ejp9 zo4LsAvxZ()4)6Q(0Tp+1cs(Xs9&@uWML0Z+>r?hh^!=~W_H60rOBL8xXwQIf)^SkL z!g~xKmIX}c7sBIw(+{BpQ2(Wgbu4`1gUiDKfZWsMQUJl_y*6S-9VY^LmA}Az%!xo; zew{7b-1}OQdvcetCOVDYPT6L9=|Xk2eTMSf^Sl7L;AL zxwPOdVw;Lge9R+nw=17E>Fx-zv(j?xC6Y3>uj-plyVIYiP(Lj;w*|+MXgaC8NlUe=S8-&&_!sTZUox`?qx=P|s za+F$dvRK*5FL-)@gU=@KVuPLYmDio(Ri5(^Z19(RMwP(&R|nF*Ad-?V36I;BuJO>X z#zYoTv8Jqah|rNfgMtcyrHVyYTjw!$h<%2Odr&;mEuj#CDc2}0x}0f{eNA|JCq7ik zy0>t00B7p`ap^`6ua9P{Mej%QvbQD8$a;!-e}LE-U5I(Xsl%3Nwfar|iAYaUA=~PAy0mabJogLyYMfgcdj`RY`v4Qp{G+HRg3fD#Hq|595lEn$W9WGzKoqghCEmsqbHL1`z;>dPB`j3cTb zSZ?%4&FqWpY>@wIUVk$f<~%mMq}C}NM7OGFS2<1cFM||$+_*pr<{jxegFC0J*!*B{ zBTyzV3;DU<9S&7+GaLS<7CBI4T2{d;{D;{+8;_9~g?@?)WpM?!=mM)m@jA<4U?=uZ z+&j`wzRRwFt~?mxc~WxkHE;MHzz%78`p9U=4b;U1K&0^tq!|Fc4{OFh2B7{+yK1%sL+~ukx*nfxk_sbNqWTK`fq{@+t~?DsM0h!X>p};x_}) ziNixV)8v%@((8>{F<_dzLM#A5=&#Fvx5M*b`Ka|VQEZ5FEB@Jc{(jMMyc!0-c*bGq z)dz_#jOQ)vn^BryeF@$(&jJg7icPMMY&h~Vw{QrLEk&S$#ujVrZf%>>$1r11yM4Gt z+&*Q#=#y6L75*|`AXqWj4QtVL^()yZ0lA`5q2>GSYtr^kQ?>e~lrw|?s|b*R$0Og* z;W53bE<%+hE1l8E(&&D)d(c?U5+)3t{5&1T#l3VIcF*$Jg9z+7wAYOC*m90P&%_b1 z0UVHpW7HhfkAc{wr;bxq zRqxd;RPfp<7>sk%L>i|5hfafNd@l6XjhMzMvz0X80{nSzF^g_Y3xxhmeJ4qd-sjg6 z=n(Nnq}T7ujK!(_wCc~^Q_mvj%7iXd7L_hdWcO@y^ee`tu-s;ZPVEYWtGeMMKVl}` z)tz{~$NGxYP2=1$eDY4|g>T4ykt662ds4D)iMtOae%qLz6 zdPRT()%1sz5mOy#Xjf=12()M%E$F~TUzhF8dE@PRERc9-g z=2-`NM;zOSsX~tNjP@;kaR@x!LW3Y^=BhTdAK*k@=4GN)^j?Boyq=KEptM(R557Uw ze5f(UYL^ zA!w5qN{<%SS}8GlNli*aDS`YwM1tza`FNE+{H-G|Fwmfk{Fu}KhrRcXYAXHS#T5`l z1XNT8kfJmZ0i_C|ib@9s0U;ETP6`U4Hxb3cfOMpT(veOQNGK|bbdVlGK#>k1K9uwyQ5e%iQYy-4VcG*6L`YqoLmBrE8a{$1J6E@i)DlCO0CpK5?Cfp+d)4oM zkzH;8;+uhwF%4Z*>{t;;LY4c8YcT!utI0cTPembUZp(UY~e{Md3}^ZZDU_ifZ0;6o75+Ti}2}P z@in4j>OZ^2gMC_??Ie9I;EuEqN}&IWZ5ZK7c`zK@_sAP^A|UxYr(-UDjWv<65)i|jLMEK*o*o;IH9~BiE(Z> z>1h7o9IbH1h{i|jm_?2n+^3KZDJ9p7x^@tXah5`~PL^@b&YIO|(aPM1buAEBMFlR^ zo7Apj@GpcqPROAx{MF!{3KwpDg-=j-?(Q}FkG*FVV^*mJ&UR9XuUW%dkFQ7$1@KW%<`?5r*-cp=G= zRNP{#;1}Z&HePAzCTL#?kAjLbzqEgvhJ$qUrJ9Lkmgn?cA#PSVi*zfSWAFJvUSRFx zD_K8b_%E>!mStz2q8kG+Os;jo!FeV=;7*?Y2X`WV=*1CFpX8fLWmjCLZfbo!MHm?r zs1Pky)qZIQ@q&y*T#c@_!(4n2fdJZd(GWE2Pb_@vSoa4> zdKYM#H?S-7lG?ofwh`SBxS{(}g^7zwL2Qy-Yc{gZW5QM!4Rz5k80WTfL0@JuL2B31 z=eJwSXlqg=6O@8XL3fzeMB?UCnFKG&X@Q*ZSso~QEayfj=TJY`MD?Ta$!yJKRl*&4 zFfHL;B3EEICPSdcdgSP|gUdA5JxX(#^9w07{T(+TV0Z*Xy6IQF0FuZPy?E_g^8_z` zmA_HTcd3r!%ZJQwvgkuJUqT~qDxq~}>+|9 z$J!Ft9Y(L?LW!VQR@Pp{dN|5xHK@S|B&J>_k-_91ZqVDnwoF zY%B__)^*8s5?**2x@r<18!g}rcGQ}jKBYxU5J?10>fABuer}6kn7Zq6-Q3X$y>N!v zBLS(+2hxhB&(Su7mxor(MCVP-x}Cm%A4J_+uRlzhGXgF`ROg>v#A2dqm53b{F_&wq z(_T1n>@b6AT`(?GUS8>{q|mr_3Z8t0ejZKOTPPTls2W}AMO(iEI0-|GoBJ@84 zqSpah9`zqwS2~cqY}sY{TL7wQ+pur#Q2sCavM0H!SWSp~@_B0uG2{gEu|uJPN+9Fq z?Qw1WlJi=cgOy|NGE0?9tT++F~M5=>3;->%IYSYz35GkEOPz zdVR!+6|K|PZJxNfm0)Pv$2T!5mXKucAxhi%E^xCO66QanlyxhVq?o6J7Gl;M4H}o) zHN%F!aV~GLu_fK$i~gmBY|X^yeb#XuO*ySRF#(S18QIqz9i6ek_?5W*ueM*yTE7bI;#E@6d$*NPE$J?0 zm-`BZ`;6|%>!eKI;~wBak)pvV{A?^d$hY@~@83@c?tZt@eD8If>Oh$XRp9P1ACvV{ z>x%uX!0zT*{DsmAhD*=45uLDsmyj9kr4&y5+`@`f+A zf|0-6BGx@46^<_lz(0*b65fd|kI(xB38gE_sOQvNXr+HIlx4Z9t*aXwE#}u;{82Ab z>mw9GODKQTIuZJc6xWEk#`Pc`ZY;pA=vg3;EmUNbot_`aw zHI_xZ+;*t)>qemgdrQhXd()IeN7PWu7SiS7dLp0q;=}!tzRxpdujuQ~yr?LCsm4=3 zc4Bh}iCY^uFjVByXPG9iqZ2IxLqQy>K(rFxbF66U7@m z;x}%z{Way~`Ayu3$4!ftX!BgPzA?_~iAn~09Qp)mP3`Met2{;d?a1hG*GrO|&ffaG z^rpiWt4e?AOCF=J#slUCAF;S0vZVcTVvJq)QM|T`!!yFm&17elCY8Xzdiu9ETJ4%q zL{?5w4J*{B0z}fXwuG>nSrfRX|MN`hV*N_@&Np!WU7||UF1w;&oUa316|K}kN)={f;#AIA{s9DQCznP=!Fdg4E*&@;u&o4nRZ6r$h+an8saqfNtP@CbKqvzGVP6-6i zv*Wji*eVU=2F1hb&&A+K9oF)ws-R4Zt>qG^_tx&bpj$)P`cT&ZqB&y7{z-}(`KV<9 z;$czPIEfRLv|zy|!0WqbhqD4b2`M^FY}+;A!HoM57OEQ~3|e*LKB;h=IgstMHo%zE zT&2Vpz8bzpC}?#dk{p3!#V@_C!n1ebHaf&=Pd$7%aI&F%d`^-}KLKtgKv|#XfOv>_ zoitk`x>Zj;0cB^GU5!R!y;tDs6XW$1>1(2`(g)uI{lMfr`;|O|fYmekrc*C7h<5vv zjE#CSC;}ed_l@mYz_T6U-|uck{F+dI-pD%99xW%Eie}h#V8wlTat~sB``zeO>+^9R zO9HRb_L5+c^0#mD%9Gy4NH5M0`ug;#4(upgMPJ~lr)c6iElaW`_ zuuIH|D)yPXyF1;dHa~EG)FBmxWtn#_*Y37aT3E>mL+K}0#Ue+Q-JUT+25O)hB)y?e z+oJ}sE*I2Y+0;^znPx+n&tl*VuhLSr z|5`V6(?lKaKC#vk&o4fr(MSZn^V|wtc>$mEZ!cVP%yHj%(aKW!WiiQAv#2g@D|HyL zOv~LV(h>i)uU))`LtCNPb?XEDiAFvf}37%T8@w~OL zEa^y05+^*SY}8arqldbDTQ3Goq%i20T4HjHHm{`?YGL7QF>LnJ@5LtAy?D_56|)I) zEfoXxBD_kgiOenNt_si2pevpG^|gL|+ah^@rVPl3!;8EOQo?~lz?e$k{nH_!nvst( zv#S28Yoe=G+Seio#sR{HR4L@ z`X@QJZ8KK<^-CoJ8v59ovm6&N&xy;mb15I~zMLnb`u3r3ble#`y!DyaI@QIM%V<>QTR*PE{Sh0sW0%5s6O&%H_Q3hU8rcaiBBthwa`hOms( z7%qD28p9mzrR@9Ql>eTAtWzuV!3_Y?A9!9kS&{@;4_M+q@M4GQ;w6edEevC`GEr~o za}H!zyAAho+stkiN5FnfkCP>q%4j(^-?wsW)~PR5H(^Fp9Fm;K(^-pwA+Tc}mT_SP zu*Y|817hegT9WYP3!9sxENHacO}Q3wP>{|@5TalgIYX~Jv6bGEv9xQdT#Nz^wd(l; ziA(+LYusCh``uybGY11&*~Js;Ph|y{hJMiUWg#`CGPF8V6kXIy9E0j~LH9}&re(Q~ zZ7o0;>ZBbQao-hS5(2P@IuU@0Ez72|KL5CV-XMCc zsyktvW=Z<^h*r8vF2572qOm&Go7vnbm&e7pfT?V03&K`}$FAy$bSz}`0J7s=e?JR`mT>a=;ebV#@ z0g(4UE*@IpGC#+zcyFu4Ms~4}91sFi8MRvTj{^nEgND@1;U5ypJABu(!3@?xy-J1^HTpD9_j6n- zFG<<%ddj6zA-kHi*lqEN1p!L;`Fe3(8)jTvvCb=2{U{jF0!ZNaAHiDcD@Qy24Qe9HS_p)3RM)NYAGz)} zpAsGRn}gyeY^o09S*`o)M=U*+J^d4GibV32hT&+1z4-}wATY{5oGk5x)W!mP%w0SQ zd$V{zoN!$eeW^n;s;H4QlH@2JkC~;{>eP=(PpNt}zujtSQkadb*2}4tG~FX_J3@1d zs$JL9T_v_tsw)J9ylM}W<0O4%nJOmATCJlqI_%#O)$l6`)}!PKq;7%Cg}wCy2CX$B zql4*jEc1f+dAZA08&h~rNd~(&PRxnC%7XNw&BqVErxx1R?J3!KQlrIhn%sYi7K67Y z!k3%uC7LSbCWkmG+@$Q%D?Fo)VwvN|f*%zcAB8fv45SB07+I$XQv4i3S9oz6LfrCJ zLOik#69K>jPkNjT$8UE88P;tt93h*!E8u(IY%+%&@*436$6Qu z6}kFp@A?|uME@auNkpz1Dj7wV%q*++nk83^f6dLEi*L1G)2Oa3pID^eHBf{gV#Oo8 z`2)$hA|E%BYkFz$z38-<czS?AVj^JC zan!2TfAYebe5Ve#dQ~JesV69-CEx3|YQ|mkV~JB%f3|vRP+Ci0Cmt0q%jMlc8I_Zy z^gKdgq=~VF%_ib_WZ0Uy8O-Yd2G*{YxVHryF7I+pVzhdXc+4@Uh9-a){U-j z1wOajjINuy<7WW4<-_TfaVrBXLsytX_2i1=9S0D@fjKrFA@C(=u`cn}2st*@Zh0nV zCGvAId)65CT=mR*hAbDeb{DzhtY>4HcvRZcUbQ}YGYMe7*y@tIJEPB4<@O+@<_G>0 zAAG1%7TLUFI@PlMTtBWvg%UW?CN!8AfF}78d$j@7M_YRi3DZ6_TZzL>8d|l(I)4y1;y39_Jh#%85{*fotqUW7?zm_%t*eU`>lLB~Ey1>LADCW0!-T;HPdjI2f0LkQQq){U% zd%x`^*)=9xw>V0)M>Q5)V=Ou7YAPluresY2E0Mp!bGU^b!%A9$aGuN+LTNEd`_IBV*;eG}qd!4y!Q#sD4 zTb)I_yYAGyZjM1J6+^`fJ{kmyyQ^66O)wP-oMI`FZGujuCCz0b$@73d|Ikd zOvH#uclkgq@vg02Ma}x>;}@sM^wBhiHTJ>3s@%Rg;9AoD78$n+oFF-f9ShpQ`JMB) z)rU(*AxHqhXp^gp&1O0$aQu+-)V;2>56K01jbvhSI~fJ?&ayjUcWtL_831mQ8c5NT zIS%+G(CyxnZX*rCt{3bgylLQ9{_BzACBCka5%HpMmZ?@A4Yr>Z>=)|=TVK)S5xPY!_*rT_GF+sAcUGz zVo7sj&3}Ou_tBIAnv!tjCGs~E{=NC_P1ZhYWUY>r?) zeKZ}6FZQ9ww$E`5Cl$pz)=OC{`|e0>s&#hhX=+9)t80%_(i9r1F(L3-M0MSFvWX_V`K6$;9nFntYLx3J-?~`zB>a;yG;kg-`hy z5&Ga;0GQEux!SQ4s2&{L`VYYTBX_kkisa8qjci|%T1)Mv8(WCbM*=6phOF4D{d_HJnaB8`2wPbFsWq;I~rxt)$?ueD! zV?#7rAJS^**o^w;y5#&qcLx}a$0a(G24V&FFvQ@W_pmCkDz_n;C)>@=Rg9zwGrkNi z258#YvyW)q?05EF2}C*tK4%L4ZFbLqQoO5P2A2-;;3PG<2>&egln?yU-nnP@84wvEdI}&{l5nC|Ac{* zMm=oasz}d zrrv@Y)!r~*La@upG-bOh$5kVZmt$Uv+0D&ci1dpS##P^x#R1}uA5Q(Zcd}M|#}73# z7}Se1<#ZxL*x19G5cN%{AV@AQ}SkXP*sDZ+NhnzXWqOCGfx7tbM`22i?AYYmAw zM^!IXZ_7qwJnGxOta>wiU!DvNrWdpVJqU*QJq}#?V|om~+j77R5HswR9|a!gHKmJP zM2iQ$%$}m=NdVU92{k_Zlh9{hupZmN1M16dww=u@`BFsMV=>1jB44pYm*FEuDj6gX zeLqW8-*5!e5j>$|MZ-ngaVn5^l|Ju2QWZz4*SVKxQpNZIEbvnk?F1<*0Lb^V{NVnd zbr!$f;uGpX7Tp(0dcZ`mrxIHsjEGmREo?vZ3BNzlC)9DbHRFlwKbD=(5U4WN%pUUs zW>xRZV^gS!{ei!4kUz0szpaX@F0d%J@8+G4(O4Szf8y26;0iGMxiQIo)f(VaUj;jB z{=j%pSV5Jzc$ASu%jHgI0@ zQh;k^y8N@I@8F-Z?ce_P;1|^R{2{JKfDV(k@lxf5s@ztN;?W-u?e|Z4194}gPMXgj zPx%a$WV(4SaQ*0XV`p7!A~i8P@-rR$_mlkKduk6pp|&(3p!WSe`|evh%qO{tr++-O z-`>3k<{ipeEw@o8_5Oa#`G7rT87^S~m?Ut`vI8i|7<_R3CsEKonm?%)e~YO3sOP%*5B&9zte|)E{x( zOQ4AkmtOmV&q<&GU=@7X$j>(eY-uEHP_q$wzN9!ReW5=0@brMjG1}l&Uz3>^Udl;l zY&8x?1%hL>RY~=vC`S*dD-P&Ac=XY`#WrUZjI_ahTkip!M&`!6&*9v53q;1e@(`3R{+pA;dMen z!jGh1mI;8^*At?E%tU6kIT-HehgIAfh8ULEz)Qu>GBTcd3`u~OJW$-+kYe_AY>VP& zO*t>f*-^K)@KKU7M;d=ZyB=+zoGwT{c|NwS4Z)$}TlMQHOx(_TqG?tE#za!GJ1al` z13iXe9vL=1x92p0fR}T1TowKLPPHPr1utn= z7<{Zt+AGv^M1RDCfkV2q#9z@P$6waH_*B5+h@s)lvl4Yn!Tzhn%kqpU~ zX5ddAZXuV{3gcZt=GPA645JZY_U@s@GxG%7Tw>N8A7stW+T-$&v|Nx|6Q7orR)n`w z)z68@ehI*OtX6*cQ(gc(GQzF1e#s%K1IgCC8tc8Pasa@>-&uDqEIt+|$qrZ_YQ^C{*ulAeq5lG6`ssyJV{n)*uz!glr-EUio8DZ@+#SaL) zTqhQH1J8h0#N+9d)X_P`YH8kIJeu2^qo01g_7oRgomr-JXxYt9%j^+d$mz|^qYQ)T zDqHiA?w6UQhxhpHnlkn;FZM{_iAf+3B3+sr^9rf%b%`4&R#^VYdZGQm5+tuKFS~uW zo;MBv$u_qqNrmLDDamxD%5Ll6qd@TS>&-1~x2n$>c_dHtqhz_>2ksJB%bk`axz>{0 z@3PVE=TLE|stpQFD+8OD2?}0)B~Uw&kr-`;(G0Bk^}LRIGPceRgZ4p1j} zP`0U*O1Q5}27W}`(1bU7BK$OCqV?UM|9>gR!H1XCe3o#zcKs{PYp`Q%F?JMnmY5s1 zjWBxGkD@DMfnm%^N)phF?|J%#nbXQOpe4W3N8} zu6F)SIxKr5L+MoXkeP%InYDt*c8um}w3M&4)|`|ZTYA%lAIa^0Q@XZmFFBg`&m<=Z z#L(xo=8fbR$D(d{0KVtAsRbNJl9{$K!bAXBbZ!jrzm}?~RA?37$B=zi>PEQ{?Xg?7 zAp4K;PyYr^J<(OAB}`UJ=-{K}xz`~X$+CB>wZyrP;=C9B=oNX_gzq(nhNaG}bDR$( zGp&j#>^{0xaKk|Y7PlMf*iwxh z2N_vsJ^CwBL{SDhpS9>W7i`}zT+dG*@HJROfw~E=VQ9(uT(F6z9Z86Rd5$^6mSUU-yK4` z&6ix=wm2*0?Z+v$>8w9vVV8t}&H86lrHh%y+7#97nl2WTLT$h*x3R2w6!=T)XlN{h zcJohsRObB|^0^B%G5!m#m%#-9bJ+cH8pYp!I|H@#{Xf*3{+8DLzgQmi@0|hm`v0+# zwtt`5|G%FZ7MLARe4Fu*i&HQ%`V6CU{uxHbhydrGVjsREzXhUF4BtKS5o^FjZ)~kU z#8I(vd%$BdV7??NIHz6Icrei_XkjE8Tq&*9)Dhcj6C&vI=45na%KRWZ+E<>Lt%jys`kjElk6vAp zj%k%*J8IDks|m+<4RWpV=uw^41SWwpK7;OStfYhON)~#Qp3T;Cg0|cDqHo>eJ9Z0n z-?eYkyne5v*tX@fnBeK`?Sb-{ubl$7$}A*>^djHaoN{)BM>VqMX!@NM;J{9E+9Qar zbxLG?#{7JYzOCSCrS_1HA$2?$pH*9=M>AO+e82Xz1>s_@fS~qP#8*(@$-`Q=AhL5Y z8TFliq6SqBsO6kb3p+skzr|Xr20%bXbQa~=UI!F0wVS?s$^Y1?-w%+zS)mg#8J zb^AExvLUcJQeIB2yD>a61z`5Z#bja;mw++#5hoZIc*7@-JEbp;5{-B%KfjI&7xZ?0qCKTP7_JTjT)oy9EE zZcP4cWJu`-%X)VxO&(z!xAQA_H8z`^?K1=w>Gsm*ka^uIQ{c0~kstJhxp^4E-V8)) zoZgPUNm9{oCxYs3m^%cV{$OgsC!$V2<7F&npI~9D-W?4QkI#xO-UZihnVj46Ec6iB z+AR?*GYu5zx$1AwFh_#yvlhoOEC>b4gDA4iZJ9QD92Arw*9+Q;y-s z-c+ZtgH6CE%#cyeS!3dHy0#|Bvtn)7%q>Fc$}zIL+y?jixzOyXU9F*$30gBAa|aRi z+iK_bmJRS~U_`S#=d_C)?09jg+(<$aMNveH^9*~M`!izH8&HAOD z4U~^kjLoNl#Ioh~w4%4yP~c~eD`Tp@gtJ~Tj23{92$g;y^@4@KTe=&`0Zk*(KwdmI ze>UQdv*I6Y@V@<1K!B18K6dCwi29P+kHN!`rwZV40nI91aB&bVp=FNLCFYo`(_*rl z{dqn{FyHH`CRUTkCZS2FD47XzO&BYbm@OxJ#3-u=q}BF&DZ6H^zxHmt#()7z{I7lS z{`|T8kO+m$+&$8J{)4HdfL4IEz{c(!k@{H&JfjP0NvwK{((8HO!(t07D!9>LC{p&8 zd{U+V_NXb-&~?3deM;xE^$6_@F2Ls6@DrGlTP9?d2`;YbB+ivo*D%p7~0VC{+%Kblj4Xyp(mH81i4w#_&i73Pdz4! zl;POp-~pY&zqZVCU>cVu&<{dkcmR@Mo^RXC{?aI2L}CUos#*?1URT zHwI!>B9B%soM;`p7-37z=U>(zX=N9Zc4=*TE(6wb|NeH}S&m&RJsw8*UIqN`t zglM(@E;l9zj9SHCl|7Phe^2T7VQr$31YRoC)N^`PDjCUydyU2MSRjbHUPbbw)nvun z+hjp|&y;&`*H2{E7^4*sw@`B}s6!T>q@Z3v+ADdnE(2%cF$4r z94FSm#8z|9rBUb$+}w_oz>Q8N%f07VSl+i_ zie#o4k1rC^2gM^>Ag(>Ilw=~VMhFUVWg3F_&+i7E;lL7as}t@9yy{intAVd7_R-rp z%-vx-lIvO@K7Pt4gW0@&V@M-CCe%wgzLdU-XdcFB+hEK&CP+u!F4EJ|j=k1n#6;!|Igz4)#Ide^ylcAGaLRL*0Y0H7yk3`y>8rYa$*G2y6W(eGLS z3@B@}$2mHvaVh$u^8+On^k#(_XH~?fQjcP|yrSlzK%bY=dWo4|W_#8w`fH3L%53b2 zww*dL^kbQ`=FBb)+$7Mw*W)|^rOd*5PF3?S3k9pZ6Pd4vhh5a{hLwehZ-(C_Y<4{l zUYP#Uz47+=s!u<>Hic3cBVfO{FMpR*0XcyALXQl*S>&;BxBa=hS~uy;)lP)0BPch5(*=@O zvuC~anBGD2&W%jER#8yLeG^*pU8mi)6Gsv~En`Mey<8kSZT62B_4o4+I0*+W0PiiA5I3&${bH~OV4=$%f9~)R032M4s1+|$P5jFb zhQ7q?XvI>Ji>NEQBXu=WVSPE zq0^2EH}p1cy;gfMGcUcrWF|wpQ*-u}qw#dll+bLvR%?n+`OX?;@JI)OEm>cS>^E6sUh&m+#$$`cH;tH-zzBgY zz9uBjeAUqDzF#N10`VuYfvrnM(~B(AvxlN~ytv@&K-2*9%5_=fG)AlxzSp z={%R_)bJtcaauGTb0ko|GP48Q0zhqPQP}OIYJ3C=@6qfk9+T0ZE}2)K^$R<*GFx|# z%k-t-$;9i37n5rymxGMcZMrcwOKfV_L1L}`o7uPIUW9mkSSO(v}^Jk1R zc9~YWYWUdc`d+dTYy;X~bd|C%c&18!)CSC4-_z^+j1CDIOfgNV+eLAn>g=z$$NOpN zgzOuGemF2hw1LKv|c&#**CO42+5UL4?V&10jFAy`T^4_lv+#PFKD_z%l-=AxWplU_KDL znTjj%%n4SnrZB}E(|eoOm=X2U)5?8sip@dixTN2=k9e^*Dv$#`)PU$Vw zKO#24_Mi?LP1$(16;S*~*Zp}%@a)ST7>4qnE7T}QiiTM5mt*!01D7bq)Vp>G@&H1*c)^ZKFHHG8T9 z4?V7O_UBB?b{u1I_L|W1>==%D9^IC%8=XW^<_|Bamzd$?nd0$!5sDpcT?PXurr++K zDdy3=8v`~=eU!>zyJFAqr0X&nZ>=)vF>ynmpb9Br6z?>`$)SfIJ_LP|@{map!`>V< z1u2vD6}#ax5LBrwSY|+J_gQFnZT2F1=tPl?qhkhmF^mnG=ZyA8;dvRPpY(1a8)T-M zXrT;u@$as378sx)Jvgml;|%cHW5~Q`i;pn3n3z3m+&TvK*1oyJg6bGyexWaQJes9-66#EP&T;{2Uo&%k*UIHmO zqO173EGw@x=n#&9)yW+d;PxHBk=(fvOoYmSxs%^0^dSf?Z=l7+G=%J}HrW6NQqT@A zv|@7h#kNrUGrY$FlSk{!WxO*);ZArF?!n;B$JdFB0qrO84s&yuTXT&T?AM=AH! zUmfuKi#rXtl?Oc|l7XabK1uv8+v}u-xpPWVa%T1DcjV4Wo6-c;ff~~fGH9&|`l)Ia zP+J<&VhIf8|e^de)J>Izd@~?

Bzr!t4O>GVLG zDO$_j2!)W|J46L({9uxl8!=nWSYzK>`0+$ys0a8q*)OM}*Yg2SX)duJ2_6=rhi=4Y zr(!49hmw9vx&#KU1aCA$shf)~7^Wv|g%vh^0b0ro!^ zSql?}w@Nc3Ru2#9D)|l2wK|kcWKTJ(=7V>K?s_YM#gqNQ#C!A&nQyimuSyWddVTf0 z;0rexhQyxep>yRaa+DQYBCgb?$IO3o(-%H>pBL7@v0VaW;4)3AFzg*YVMvtccoO-c z-g=I_(ZIvfN%{OCGpGVgZ+))Y9XUyo$?Ll=<8w971(hv6P(1BPOhLVMb<8Lv6w7W{ z9u4)g;65l!ygG5d(a`g$PtNkks|ii(gzBsgVo zNe3^elvr%)Rkpo&W!b(y2sh`Nu3L>#rkvpF%9%dd%-CFrcvFxsuQ7m-0y9`jNeS71 zKn>~*iCB4%12Fv`4D$%@KOIE0%YHH5Voq`toZEb_=j%{5Bi_+hgc?u;Hg-;B)F5g7YsV+|pAD>|Bw*b(b^8X^5+T7nGH2~?gjNaEh-P3e zB{U121>&0Uw1DKB-L3sFOm^Yw=42|Y)_1Rrv9eXXx-kt@Qzw>?uxA1)_t2SpsghzF zvOq^L!98r4RJ>ygduLWb@an65j)SFxP!=8N(F}PeT#7w(1fQ~s_X%B~r?n&=a!t*( z{1Q}qYgDIx|E@Z{C9E2XiYgxHa3PQ6fUp%(c&M1w+T(g>Ir1H_QB@fZ&Y$+u@*U&T zy zf@cW|jT*Z9Da9tK?p2yv!BlioR|kFVWScp&hdM^7jClmXW#1$30)kIygN?hK8rO`*JN-$Pgqf8`7r z*S)gjh#}l{3JES|q^YQ^xa83ke+=$YhF4FPGSfRtsMagn^hgXH7MhkaeTx&u`$FH; zM6{+n!tR8Ei1F<+pa?XfHWklmYJo4)DUM@WUcUaWI*;>enQrl3AEr<(nanzp;GBRhh2azZ}4FD z#M`(RLpuOs{|G3LyEgTU=TpzE;zD(=&GvQ8SDsZl;XL9q_~&d|y5(=(@p7XxvOvRn zAik}WB@o`)Pt2B;q@rtO&K6lud*!TBQ^i1G+))P1iGkq)$i<#O(4?QFO)XKkZkbnh zM;81b*%h<5IZ*U!^Q6@JFfOeDQYY`8C>1?t?W$*kml3HuK!R8?3w4R4EA3$7A(g(8I=VkcK zXYfL(I9ZBqB&l=&(K5*sRK;up6P|8$ovFG{5FboiFEDjvC}RBF*596r=T5NntgUQI z6HWzAgWK(CQH4{lk`|>i@FI^3T#(yx8l7k*RY@V3unlu+A<18}%@ZSZURKS{&nSj@ zK=(1E8A;7m5lgeErqjeR8It|GF;Aj^40ok7+ehs>_?H@&v!xFCF&kN?%z^LQDQ zf)&}7wWrz{bPp+tyiWUh#hB)EM}#%33+Y(onKlaPC@rcZ-Qc?A1;5A*hejVKtPLUL z@bWSn$l&6k*pa~x@vXj-urN=TXC!(HFP393LU!*%;)qKOuVSGziVSDPtkS-SlPa z8g{pg*w!yTS-`*((oFc%3$%HeH>W4ia@56sCt`TUjSt$8B> z3sYA#j>Rr(X2TuGx;oAmSFL$+=?l9Il*t#OE7b8RGSCG%$CFd3cOUMmAMu=X{YyF| zMNNmGYChcG)1eHiz)Csnx1!@O;r`0s4yB6T%;kx0X?qOu;o%vx5~16Noh~KFy5$i| zEh<2L4`U~5Onb}{U*boUo&5y+m*c0r@(4pd>IpO2GN-NA)Xdd0 zJ*DQzAgCzOtl}m3kYge--lLR;@}Vd~BT10v$p8p0W_eaWyW*x6zmG$R*nV&YKtHcH7_F2f4+ zgEQN6jqF`FuiqSjV__XnWHR$`4uf<>LTz%8W9JJ?3BwLbSJVmjixa0$b~s?6?j7a0 zb9lNw2PU9Dv;%L|ly=B9;s&gb)JR9n(dQaWnwnEg1d^prIq;=q_lS1CBoC=ThX?@g z8>k47S?O1Cx?ZAIgo__~IOqU!`g(f-4Gx(bvY+;8olKS;Md8d9s$O?{4o{v!J!ejv zv-UA%j`je}Tbj}hpIOBaDj7W+IQyr#gI>tY?W84GA&|_EbXJC)dt`>I`?S^PF(Hych}W;laMf z;u|T9sdx`8tS1|LgOnlj=p2x6A!SdmVBUR-P;i0XA6URksf5>wIX6zk+--N|0-S<84ErfMXy~43CrgBfSld*Z>V${aAhC{ii-GY_U_f*z zrU4m);c5t6KbUv$2T_#4K$VJRe3&%;T`HFG5vYw?8_a%=3uBvZ{8gd+DYc-Sf7@`l zu22nq0p2eb#^wPY{Z$FTlJ@V;`@FU|_)G8v(7^Jt0PEmOTkt(XW8$BAsCHvuMy$J~ z*~pKJ)J|&&C7D>@A!5Pa{OLdu{WLl%{nr@uW60bJwD+3QIx@0DN7(di-Bo13dI)h4 zH6`yixGZ1i`gZ<%vI{`WR{(~KE0N9XM{*Mgs5V#0vzxD|U2*!KQU@*mTbce}>Wlbo zb^k5!|IuXgH<90exxM87{$-?ej`+OMFor>PTJ5_1f>o!vkIxvZ9(@~HJ7-)n@J#u9 zUf%s%`;O9G?S+tPE&WW7WNEQojT`}Mk^+QWi?~&_X%YouQW7^i{C2LmbtwB$)_Fz( z)>TH$c`o0-JfJSy5|XQx2`OuT)s@|a9E=)AXx^v$>E)t&|1z3>apNpwA^V;`kN*C}k@p`T$>>^*3}622Y&dNWC7(rz zAjCWc%>~C+XqExM{ zc>0Ssxxi?sn!dhlaqU9rx!R)FuMfM{terc<&;P017=oDXZEmu(s?$(?1dLnRWp!s4 zmkDDn#a|p#G0m{S;N(pY6M=km@w@@&Fubdo#{EuPeY|^6>m%9k*;UGXdlE@qTcK-LLV+ zzG-qktot7~JjIcN8n{S7E+J%@#{Qg1dKdg`)+DB`!9CJDx50^yjx%Y?O&dn=k2=rq0On6IvFxfx?QbP) z-Lg=e_uslt%NqJOZ!-HmVH-Zu&!@{Om1UmsE?HI-!7nzl1YI^Ow1{Ir{o_;r@rTAI zz=*g{TT@J40d5J;`JbL^IbC<3#=4gHY=-v%1GY$x+E{VSdG}s(Y}~HA&=n$dJ4c)O zEkF9d3IBil>;}Z*k;V&a@^sJL`;v7-oM#$e$~ZV}w+49U<5U6{0#d9*#&!_=|Mu+v zrDN_hqPV!!o;Tz2E@a0lwL0+5XcSnC$L)?P2?vX+7rtPGD>F9hj26iEif3g%@oas? zO%?+CNACY2qPMYk{H?fK4@#Nv0SD*!URN6p#Gh&_#bg%I>0Zv>cd?W`2)ZT62c}KrZ!RgV7rKlOml@QBCj#c6U3kROY47JH^X~N7Yo8 zJ>FW;OUC#8pCc?-NjHc~r2J?gV%B1-%dcUQ!aEeKAtj@V zz8P@OZ3oP}uazWk8qBUl&mOXQjES;6cxCqh_!UM^eg|yFK7QT6dGfUP);~)eb7@KA z^=}&J&`sfwUZqX_8muAp|KkAv3f89a=*sJA7%FhMWTsrEwo_iX^`C(CJ9_Ya7PGZ| zxm&-+7MtpET$*p?v-E`6X%C)-^Ym%Z50yOCEixt>`=(1`7kk1sM&BV(5yHW3oe6w= z91+CZ=)lJB+*XYuv@uZUQJyC^7rf$1yJ+==`O=9tgV)AX9Qw{^2iP+aUzVoc@4@jG zSXiEvGTmxxpvEs&l_;P4KS)u}f34`e?KweBV}5MjQ;(T&6I|#d`iYaBqLl$6zMVEs zl9YNqoY<4nIMg#9ggN*Eh2Nt~PI{BR>4w&lYQ>&^DrZD+Yd|ULXn^H>OXr>xN?$Ui z7t|h`zm%a(iQ>`idXM4bm9dO1*P{>YVyhMul)F$8+>t!DodzXX?^xPvp!%0g<$Ur+ zU0+9V$?nOYSoTlKQl%El)TWbnaKZ-yx)nT)rzsfqwb(G%Ki>uTJ5|oVL)jWH>V!MZ zM!4;ge2PWOSR~}ra&!SA?n}eVrQB`1BSKbPw@4xV<1^Q6kyCvdns!Bg+*A5+WolpM zy~O*{>z!~9e3WFmIvy*JxgwO)di|)2E*aAo3+c&uYz_^YN6{A}Zb1}IB);_!JQ2({ z^eg$?jM@z@DYykx9`!$95zP9H&7y6JyO^TUJKVKzYjQ=epFK=K`1HsH(3I#}9C?~} z-i@&$vp9Y3evT8}zk?e!f$m~YBpbtx@kTwB9ego?p{pygxvQ}Z{G0yFTPY_k&gJxc+T-M;Dmkxd&Ad_pbgz_2| z`o^yu$F7ycKGM`q+mhm*$^Lwv@jB@y2(m~D2^&(_C||dG#Oo6a%y&Rt6uz+7Zb3}5z_7lTzR8I$G zZyE}-QGZ~|f1+W%7t9VCnf}0up~c$Vu(o=I!vn!Fkk--N+2N;zn80M(;$EVgP8Pbe zXsS2$j8-t{am8?Y_V>`ty;1dq4i4%r)d@sipQ$^Ds5TbY9nIVSBLoo%06Lgd)y)#h zW@=lnGkd;70)@^9+B}JWi(4YEC^WUi=ISBi&eq}={ zJM81xqaEF>NU&wh1)hoWP9Dp{z0ADE7EiQsX0Dyj@5krKlO;{kH)_|J6eB|K-7QL{ zDRHnr-(lV|n1#z?rraEb13I%Jq6g&~m1p#5Y0E$H z6{ujG!;J6jyA|)kv?iGy=Dm@-oD3i)zleD{7-jU)W_$@p1VXv3fR8&=QJPfUI~Ua? zEP0b~hBZdsuqy1PqO7K4&}+UW2q~2HhrUl^9?|HP?(+!OPX)7G(GgpA$;NoZU5`0E zs4_oQGBXQQk<~5cS=59nkes@+Tux? zZApHLy*Y6jCG41;EyuW1G%{UVbo?@c!(dud**j*ZzW*j}rB_eNpXQ7Ic3vvKCY?p%i0^;Y}{6FM8PS1R%a5JQ9BaHWuJgp zdBliaULErpOQ7kRDud1=FDYw(ylmAolPVy^^8v#5(|3gJO87tK*Mu}br_;J7HNgDz zmziDI|0nZ%cM20-*vR|vu8R})RkdKuO8;n8fh4ElhzvY?;rZw(-?z;@e6z}fjePp!jX8YWwX_ej-%(gs78sT>;ms3i%kM1XCgPW8Q5>TbS+8Bu7=$jO7;6icR^<5C8}x}%JUH3$hKbE5GOW6| z=@PM()*l9f8Q~31{f5$Y3?W0T_?ju~IWUjOcZqQ~{tzGDW#&KNl}T*$iDt#1tcP{o>b#^}lYkRer1rmd4l z#$Nk58?oAIv`f7Wi0zxzi|4i7>@Qk^#^QoGaYeRiH3MI~DMg$;F!H*&N6MJLvL-qB zYX5)?$95rURqlbncv~lP)WzwoWIqpg*=$lQv*pLbq;*5rXBKns%X6E@sL!tWS7(?x zMc)^IHOC2A^u^|-{)E3k0Mo4`+3*4Y+X>&%)*V?K5=B0;X82R_AT#whs=EsPBF8Nn zPh=mS958&KSMoN+;T{w$}8;u48Nni-IC|Fu+RH_PBy|Pr$F?}2=K*ku>yYp26T5tpp@R2!_x_7Tnb;DA&j&c$)SodEiX|^O zi!oJFg8B=3@$1UcuQA{_AmzZc?-)Sa^nRf|FlAdVN|Ijx-XouKDr8A*ETOb(f|tqk z^aL(#7y6A${A^7h*M2EJCP*QQ0)(v_eU$7srq`N$XUu6!FBc$AEfmJUl}>BlZBTqr zTHLL}Z539GYGe{0A;S*tQQ&ne@-Pc;Nj%DA+z3mYI>rE4 z%&zpZjHAt{ruJ&(knE`)9+{GkgK5%=->^>P?3L^m>6?~BZvYXUX7_ZfpNKF{=F)49 zE48Aq)~o$N8&5l}ql9FeMO!k_9{@wD=q;XT(gp|m7I1}$&I3lEJ)tYa(UA5Bp(DF1 zUjk4oW)4rr^*&j_>$2<<*(Mq-)N^R)dCYttJbw07CNiBML3S{mm=K70f&5Qrv5tSc%vj?6ZD}0rO)tWY(_@`tdU#mISeCTfV}jqzh9GTN%*u zK3FwVg7%K>JL_yBvy?$#de0ilU&uQ`gU6f~s2ZBAWPXfQH>a_3I+i$>>LcV4?j&w{|w$9|MB0+dMp^F)L%yD6MWNfyMV@g}eB{Goxw3Y+boTS|yB#Zg2mteMxXFeij0ta+5yRJCre@$8KXTYama8xvuj zU2a%UBCXXnt9mF)WWt3UhM`M_f-J=r;vrpUs=)gP;%KMbg$SB<>^OcN;|A4zau!Ij z5Ugi8PG*@H5r;WZl<6H#x1ijpl~!}x}cV@4lu<*LtMe7~4XMDbtIblw;+!$wO9)*4cv!}jsv zf1mF4l->=@71(z7Xb*qMObCwGOFvaEU;XwC=Yk-tuu%L$_1z7Nh~6@BneAbe2;UwG zZWx+watvazTa{IoG+!NX46JHbGrSUw3PN7k zgOXC?N?Bg^%q*ZY$Jh64KxoF7#Na6;fcv8p zAIZegnijA^{8esz>2lxN+9WE1>49IUX%fF_P{u2YIA)=3y79vWkTV$rN(t|x^v;$S z%NFe7(!{9o+N{zKO3!5|twRIS0tJybbhni&kCf%=6vxqxT?6G$H+rWtZi-e!HNCjH zaps_A-!;8FiHU0!xhEM;o#@~z(~MneaVd^Xx;C&%XRB@`FQak%05N+r`$`H?97A}H zPQ?NPwa8f@Kf3wcgU1y0%{uF|7f~Cksn31gn`?N^LQ;9e)ni7(GtxC?+tqRk;xyGT zo>4Js!wv0=!^+RnLG-QD8ofU1aYMf{azvRo)IpJGWaEQz;$vmgOg<-aD9Pu)D(p8a zi2Q8JYGq=FZiYwlG(3x&k69>KAX#jpL>EVdG)tXMl6d-t;DHY!)F-O5o3#qgo9|=6 zJmP2Z*f?$}JsWo$dn4<&m(of0q--*vxrJ4S0|fc$N0Vo(awwNl_Z2iPHY^p0x7?kh z|7mWvEC6=qLO-Jb;7holE@=j`9o$LNJ`MW2K=wv8e?1>b(7ZHTFOzB8#%MZzXord_ zNh#EZO(x(^^Zfavt?^|_ZfM8l=Hv}b25Nl$KOP^<%^?P8=D(&U?hm#Z0lS?~S(lcZKiA$o&-s+%i4d=Sq?XBSHun>xl!&Q|n|9?7mF?+*?J$!x7O#CgJU7eD zhSE!&Vd|2JE~dU;C14sjo#E3$&CE7umRk?08Nh#&hcVWIRYb>_Wb>Q~G$^g_l(b<| zzjjOh;@U;^=Z^j-S^ga*?X;M_jav^)+p}2%SrA1(ufA*B2e0FtjnM)Qa39!0RGoE? zgq<$;Q;v?UfeRIK_+!G?OhCnlIY^#gd+ z)v-Tawt0FvFr5pu(4n5E{v1N(rp4kBQ7Kb$YCRV7D+lx=!0@;-Q5Iu;a24}@GK>1g zR@C8qat|B4QXQ{5*zOeiawZa|?n+iLF?4G7YE>nye_Z4VabrixPtQK!SeHJa*#Of+ zoydCH3@2Sd+0(kvWBZ%tL&=JMR;Arv9DT+k61801?Fh~6<4JB|H*A+X#W?6 z_XY4Z|JG}LnYWS_H1^XK3X#(;y&j8cdz{t%nGw98lRTS>d>7=8u5p3kG zBy;Ml=ic%)SMiu1H015hn)Y)KgQeZ3$>ZS@%tujNYnF_!5E85yx+xpO zDKt4n3UiYl;+i=Hx!RR12X`|C-I^F*79D+CA2}{b&>k!6ds(d)9!0K;l}f-WQz))} z|6|%i-T0it925ltLE||HkghxI#_BWT6JI5W^a)t!#eJv(dFjHn1CLKtThp4~<)dG1 z;kX*WB;?roYM*P|5RK$E2L*)=3Pn8~ir$Z1 zmcW_HNUb3%6aGO2+}><^&i5}A=FN)-jEK5)E60+bLy9UXzWnz+^ev2SqPn#-p0m#! zPVHI@9Gs-*hk329Uz_^*Nnhc|8Be6d`7JL$NfoqItnL5E#G&4?kIPXV|Jcnb@qPOg zp8Med&Nz@Tj1v*>{mjHd>8U|V33e6Hx%#H*=x+#e4h&^vi+>U38d>HZH-+bvnN%sM@0JwT^U*T<{i=3G&k=DY14N1|SM(%gfr25&We0;gmXjmg zyQs(ydis@Br&RXxwkeE~k&}#S>l6CsA~y-`fEH!`+Cna^9ncyK<`b&sl=jV2qsu_^OSR4~2~7%KHy>$+x3%ij{h+<-1O{)$b&T+!uCQI)Gn&PQQCyFK z&J~N{n7)zrx3S;Q5_Q!0KXaI^;sPjJVT1jvYG+vj{8xGz(Bq|9?P69?dP~MGw^~4F zd5v-}2hA*uF}4s-G3R63&(Z*WGg&4ugQy`m`W4wU(C<#^FK4=ApTk}XRlWLbZUFZ3 z|Di>bA+Mpfs^h5sKa3I%tZ6 zI6QqSarMdjBCxW`58O2@{F{|R?vE)q#}?8yye@K=1agb&atQPMbrjtC=e| z#iGk>wZK}v!X`}@FAqJ_O(yPFJZG=f8D3$Dx`9zw0aeD7(O}g#beh@|VWvTc3@mir z3(dAClcArWhcSJ7Nf67g$DNh_fn;y(Tm-&C6Jq+V`b_&~^Ird00Y`64sc80jtii4a zj!U^$3RW;tr7U=;(zA-2CJNt3Im|{jw=sUzlv7L*>6~^< zYHE4Ot4l{M`hc!JrsNDN)2|s>cj#^(nJ_W7mTgwKVZSxOoPnWfUy*v!g7vXq7zjxt z0-ndLD=TRyHF9n4XD4T`eJ@aBL&h3^a$D31i=9re%hNe0^C#j`cecZxff=oD{;|mx zCt!woxjQ@ltCtb76#^lclR%DeILcAK~b zIm)wP>vPh_`7;$DyB8(KDt^ZsYO@mK@(-UU#2GA<#pl8bPT{}sx?Ll4u}?-SEKX(g zLC=nByq0qqQyP>drxj_0`rOt-nBi%DGZd?`pf!=05=+%ti8CJ1xGeE<(2=6VkAxlR zMKe3@;|@J(=iTOt+i$YtF$&t@Uw-=K%JLIfYbDgWb5MUEV&gSaSJ;-mHKCYp6JM5d zRqq3(khhLts*Qi&8=6bW2`L=RI7)fVm(<=g(vf^e757_?J9c_#gl?HfHEtl4hude= zH6J<8-2Ou^UEh-TpL$iY(R7(^PtVl+3xA1%JFG^frDX#$Y(p6D7Co|~^C(pJJJx}F z@!Q))U;p4m-<&zfm~9wgSG!-9kQ0&MS&@qd?bXLUzTbBfbqzNrukQ2of3!}wu_QTa zsCM5bdVlDqGB?%HI5B~bkXVz_MC^V}#18wSTBljiLj{+N5Jd%5i{K@RzFKYr zwZS}2QKie>8^$GTE|XEVjak4rac8d2@2wSR4C>!}oii4>hkeX2cJwtT|3nTO_|(GM z+cLxI_qp3|&9_XLl$+eY%&B#CIPf*8-wF9TEfFbqV6#)l03wtNY?Q_5CQ>jj3Y6DoepA3b>oac3)al-3d zP0{NhGnKq|Bkj$UqT`;Xuo(~IgtI;!Zsm<6i;{PfMb4|WM8nW)(MD%o6t+W9=VyA- zaOa|R@LhV2VRpU=T270tDRzjeBe&}z6%Cl%);w0y&^Yl_{uf@A^dmLLAIXsI_O