From b196083e1af4fa934d1dbffa8245a3de2ae3a40c Mon Sep 17 00:00:00 2001 From: Alejandro Pedraza Date: Tue, 16 Jul 2024 08:27:54 -0500 Subject: [PATCH] Expand tests to account for audit access policy Followup to #12846, branched off alpeb/policy-audit-impl This fixes the policy controller unit and integration tests by accounting for the new Audit default policy and the new accessPolicy field in Server. New integration tests added: - e2e_audit.rs exercising first the audit policy in Server, and then at the namespace level - in admit_server.rs a new test checks invalid accessPolicy values are rejected. - in inbound_api.rs server_with_audit_policy verifies the synthesized audit authorization is returned for a Server with accessPolicy=audit --- .../k8s/index/src/inbound/tests.rs | 11 +- .../k8s/index/src/inbound/tests/annotation.rs | 1 + .../k8s/status/src/tests/routes.rs | 1 + policy-test/src/web.rs | 3 +- policy-test/tests/admit_server.rs | 23 ++++ policy-test/tests/e2e_audit.rs | 123 ++++++++++++++++++ policy-test/tests/e2e_authorization_policy.rs | 16 +-- policy-test/tests/e2e_server_authorization.rs | 8 +- policy-test/tests/inbound_api.rs | 73 ++++++++++- .../tests/inbound_api_external_workload.rs | 1 + .../tests/inbound_http_route_status.rs | 5 + 11 files changed, 246 insertions(+), 19 deletions(-) create mode 100644 policy-test/tests/e2e_audit.rs diff --git a/policy-controller/k8s/index/src/inbound/tests.rs b/policy-controller/k8s/index/src/inbound/tests.rs index d9b835e8e8b41..2232cfde571b8 100644 --- a/policy-controller/k8s/index/src/inbound/tests.rs +++ b/policy-controller/k8s/index/src/inbound/tests.rs @@ -45,7 +45,7 @@ struct TestConfig { _tracing: tracing::subscriber::DefaultGuard, } -const DEFAULTS: [DefaultPolicy; 5] = [ +const DEFAULTS: [DefaultPolicy; 6] = [ DefaultPolicy::Deny, DefaultPolicy::Allow { authenticated_only: true, @@ -63,6 +63,7 @@ const DEFAULTS: [DefaultPolicy; 5] = [ authenticated_only: false, cluster_only: true, }, + DefaultPolicy::Audit, ]; pub fn mk_pod_with_containers( @@ -121,6 +122,7 @@ fn mk_server( port, selector: k8s::policy::server::Selector::Pod(pod_labels.into_iter().collect()), proxy_protocol, + access_policy: None, }, } } @@ -177,6 +179,13 @@ fn mk_default_policy( networks: cluster_nets, }, )), + DefaultPolicy::Audit => Some(( + AuthorizationRef::Default("audit"), + ClientAuthorization { + authentication: ClientAuthentication::Unauthenticated, + networks: all_nets, + }, + )), } .into_iter() .collect() diff --git a/policy-controller/k8s/index/src/inbound/tests/annotation.rs b/policy-controller/k8s/index/src/inbound/tests/annotation.rs index 61a25a26f6767..92f0a604647a3 100644 --- a/policy-controller/k8s/index/src/inbound/tests/annotation.rs +++ b/policy-controller/k8s/index/src/inbound/tests/annotation.rs @@ -109,6 +109,7 @@ fn authenticated_annotated() { authenticated_only: true, }, DefaultPolicy::Deny => DefaultPolicy::Deny, + DefaultPolicy::Audit => DefaultPolicy::Audit, }; InboundServer { reference: ServerRef::Default(policy.as_str()), diff --git a/policy-controller/k8s/status/src/tests/routes.rs b/policy-controller/k8s/status/src/tests/routes.rs index a0d7af02b84ed..420d884215f6f 100644 --- a/policy-controller/k8s/status/src/tests/routes.rs +++ b/policy-controller/k8s/status/src/tests/routes.rs @@ -47,6 +47,7 @@ fn make_server( port, selector: linkerd_k8s_api::server::Selector::Pod(pod_labels.into_iter().collect()), proxy_protocol, + access_policy: None, }, } } diff --git a/policy-test/src/web.rs b/policy-test/src/web.rs index 6b86bd2470389..3bb0078756ce8 100644 --- a/policy-test/src/web.rs +++ b/policy-test/src/web.rs @@ -33,7 +33,7 @@ pub fn pod(ns: &str) -> k8s::Pod { } } -pub fn server(ns: &str) -> k8s::policy::Server { +pub fn server(ns: &str, access_policy: Option) -> k8s::policy::Server { k8s::policy::Server { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), @@ -46,6 +46,7 @@ pub fn server(ns: &str) -> k8s::policy::Server { )))), port: k8s::policy::server::Port::Name("http".to_string()), proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy, }, } } diff --git a/policy-test/tests/admit_server.rs b/policy-test/tests/admit_server.rs index d8dcb34cdf8da..faf67caf63522 100644 --- a/policy-test/tests/admit_server.rs +++ b/policy-test/tests/admit_server.rs @@ -16,6 +16,7 @@ async fn accepts_valid() { selector: Selector::Pod(api::labels::Selector::default()), port: Port::Number(80.try_into().unwrap()), proxy_protocol: None, + access_policy: None, }, }) .await; @@ -34,6 +35,7 @@ async fn accepts_server_updates() { selector: Selector::Pod(api::labels::Selector::from_iter(Some(("app", "test")))), port: Port::Number(80.try_into().unwrap()), proxy_protocol: None, + access_policy: None, }, }; @@ -60,6 +62,7 @@ async fn rejects_identitical_pod_selector() { selector: Selector::Pod(api::labels::Selector::from_iter(Some(("app", "test")))), port: Port::Number(80.try_into().unwrap()), proxy_protocol: None, + access_policy: None, }; let api = kube::Api::namespaced(client, &ns); @@ -106,6 +109,7 @@ async fn rejects_all_pods_selected() { selector: Selector::Pod(api::labels::Selector::from_iter(Some(("app", "test")))), port: Port::Number(80.try_into().unwrap()), proxy_protocol: Some(ProxyProtocol::Http2), + access_policy: None, }, }; api.create(&kube::api::PostParams::default(), &test0) @@ -123,6 +127,7 @@ async fn rejects_all_pods_selected() { port: Port::Number(80.try_into().unwrap()), // proxy protocol doesn't factor into the selection proxy_protocol: Some(ProxyProtocol::Http1), + access_policy: None, }, }; api.create(&kube::api::PostParams::default(), &test1) @@ -179,3 +184,21 @@ async fn rejects_invalid_proxy_protocol() { }) .await; } + +#[tokio::test(flavor = "current_thread")] +async fn rejects_invalid_access_policy() { + admission::rejects(|ns| Server { + metadata: api::ObjectMeta { + namespace: Some(ns), + name: Some("test".to_string()), + ..Default::default() + }, + spec: ServerSpec { + selector: Selector::Pod(api::labels::Selector::default()), + port: Port::Number(80.try_into().unwrap()), + proxy_protocol: None, + access_policy: Some("foobar".to_string()), + }, + }) + .await; +} diff --git a/policy-test/tests/e2e_audit.rs b/policy-test/tests/e2e_audit.rs new file mode 100644 index 0000000000000..0fd25433e050e --- /dev/null +++ b/policy-test/tests/e2e_audit.rs @@ -0,0 +1,123 @@ +use kube::{Client, ResourceExt}; +use linkerd_policy_controller_k8s_api as k8s; +use linkerd_policy_test::{create, create_ready_pod, curl, web, with_temp_ns, LinkerdInject}; + +#[tokio::test(flavor = "current_thread")] +async fn server_audit() { + with_temp_ns(|client, ns| async move { + // Create a server with no policy that should block traffic to the associated pod + let srv = create(&client, web::server(&ns, None)).await; + + // Create the web pod and wait for it to be ready. + tokio::join!( + create(&client, web::service(&ns)), + create_ready_pod(&client, web::pod(&ns)) + ); + + // All requests should fail + let curl = curl::Runner::init(&client, &ns).await; + let (injected, uninjected) = tokio::join!( + curl.run("curl-injected", "http://web", LinkerdInject::Enabled), + curl.run("curl-uninjected", "http://web", LinkerdInject::Disabled), + ); + let (injected_status, uninjected_status) = + tokio::join!(injected.exit_code(), uninjected.exit_code()); + assert_ne!(injected_status, 0, "injected curl must fail"); + assert_ne!(uninjected_status, 0, "uninjected curl must fail"); + + // Patch the server with accessPolicy audit + let patch = serde_json::json!({ + "spec": { + "accessPolicy": "audit", + } + }); + let patch = k8s::Patch::Merge(patch); + let api = k8s::Api::::namespaced(client.clone(), &ns); + api.patch(&srv.name_unchecked(), &k8s::PatchParams::default(), &patch) + .await + .expect("failed to patch server"); + + // All requests should succeed + let (injected, uninjected) = tokio::join!( + curl.run("curl-audit-injected", "http://web", LinkerdInject::Enabled), + curl.run( + "curl-audit-uninjected", + "http://web", + LinkerdInject::Disabled + ), + ); + let (injected_status, uninjected_status) = + tokio::join!(injected.exit_code(), uninjected.exit_code()); + assert_eq!(injected_status, 0, "injected curl must contact web"); + assert_eq!(uninjected_status, 0, "uninjected curl must contact web"); + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn ns_audit() { + with_temp_ns(|client, ns| async move { + change_access_policy(client.clone(), &ns, "cluster-authenticated").await; + + // Create the web pod and wait for it to be ready. + tokio::join!( + create(&client, web::service(&ns)), + create_ready_pod(&client, web::pod(&ns)) + ); + + // Unmeshed requests should fail + let curl = curl::Runner::init(&client, &ns).await; + let (injected, uninjected) = tokio::join!( + curl.run("curl-injected", "http://web", LinkerdInject::Enabled), + curl.run("curl-uninjected", "http://web", LinkerdInject::Disabled), + ); + let (injected_status, uninjected_status) = + tokio::join!(injected.exit_code(), uninjected.exit_code()); + assert_eq!(injected_status, 0, "injected curl must contact web"); + assert_ne!(uninjected_status, 0, "uninjected curl must fail"); + + change_access_policy(client.clone(), &ns, "audit").await; + + // Recreate pod for it to pick the new default policy + let api = kube::Api::::namespaced(client.clone(), &ns); + kube::runtime::wait::delete::delete_and_finalize( + api, + "web", + &kube::api::DeleteParams::foreground(), + ) + .await + .expect("web pod must be deleted"); + + create_ready_pod(&client, web::pod(&ns)).await; + + // All requests should work + let (injected, uninjected) = tokio::join!( + curl.run("curl-audit-injected", "http://web", LinkerdInject::Enabled), + curl.run( + "curl-audit-uninjected", + "http://web", + LinkerdInject::Disabled + ), + ); + let (injected_status, uninjected_status) = + tokio::join!(injected.exit_code(), uninjected.exit_code()); + assert_eq!(injected_status, 0, "injected curl must contact web"); + assert_eq!(uninjected_status, 0, "uninject curl must contact web"); + }) + .await; +} + +async fn change_access_policy(client: Client, ns: &str, policy: &str) { + let api = k8s::Api::::all(client.clone()); + let patch = serde_json::json!({ + "metadata": { + "annotations": { + "config.linkerd.io/default-inbound-policy": policy, + } + } + }); + let patch = k8s::Patch::Merge(patch); + api.patch(ns, &k8s::PatchParams::default(), &patch) + .await + .expect("failed to patch namespace"); +} diff --git a/policy-test/tests/e2e_authorization_policy.rs b/policy-test/tests/e2e_authorization_policy.rs index 578b9a19e70f7..cf729402122ad 100644 --- a/policy-test/tests/e2e_authorization_policy.rs +++ b/policy-test/tests/e2e_authorization_policy.rs @@ -16,7 +16,7 @@ async fn meshtls() { // // The policy requires that all connections are authenticated with MeshTLS. let (srv, all_mtls) = tokio::join!( - create(&client, web::server(&ns)), + create(&client, web::server(&ns, None)), create(&client, all_authenticated(&ns)) ); create( @@ -60,7 +60,7 @@ async fn targets_route() { // // The policy requires that all connections are authenticated with MeshTLS. let (srv, all_mtls) = tokio::join!( - create(&client, web::server(&ns)), + create(&client, web::server(&ns, None)), create(&client, all_authenticated(&ns)), ); // Create a route which matches the /allowed path. @@ -171,7 +171,7 @@ async fn targets_namespace() { // // The policy requires that all connections are authenticated with MeshTLS. let (_srv, all_mtls) = tokio::join!( - create(&client, web::server(&ns)), + create(&client, web::server(&ns, None)), create(&client, all_authenticated(&ns)) ); create( @@ -220,7 +220,7 @@ async fn meshtls_namespace() { // The policy requires that all connections are authenticated with MeshTLS // and come from service accounts in the given namespace. let (srv, mtls_ns) = tokio::join!( - create(&client, web::server(&ns)), + create(&client, web::server(&ns, None)), create(&client, ns_authenticated(&ns)) ); create( @@ -277,7 +277,7 @@ async fn network() { // Once we know the IP of the (blocked) pod, create an web // authorization policy that permits connections from this pod. let (srv, allow_ips) = tokio::join!( - create(&client, web::server(&ns)), + create(&client, web::server(&ns, None)), create(&client, allow_ips(&ns, Some(blessed_ip))) ); create( @@ -351,7 +351,7 @@ async fn both() { // Once we know the IP of the (blocked) pod, create an web // authorization policy that permits connections from this pod. let (srv, allow_ips, all_mtls) = tokio::join!( - create(&client, web::server(&ns)), + create(&client, web::server(&ns, None)), create( &client, allow_ips(&ns, vec![blessed_injected_ip, blessed_uninjected_ip]), @@ -451,7 +451,7 @@ async fn either() { // Once we know the IP of the (blocked) pod, create an web // authorization policy that permits connections from this pod. let (srv, allow_ips, all_mtls) = tokio::join!( - create(&client, web::server(&ns)), + create(&client, web::server(&ns, None)), create(&client, allow_ips(&ns, vec![blessed_uninjected_ip])), create(&client, all_authenticated(&ns)) ); @@ -528,7 +528,7 @@ async fn either() { async fn empty_authentications() { with_temp_ns(|client, ns| async move { // Create a policy that does not require any authentications. - let srv = create(&client, web::server(&ns)).await; + let srv = create(&client, web::server(&ns, None)).await; create( &client, authz_policy(&ns, "web", LocalTargetRef::from_resource(&srv), None), diff --git a/policy-test/tests/e2e_server_authorization.rs b/policy-test/tests/e2e_server_authorization.rs index 78b9e74f4c412..67b923bf1dd8b 100644 --- a/policy-test/tests/e2e_server_authorization.rs +++ b/policy-test/tests/e2e_server_authorization.rs @@ -9,7 +9,7 @@ use linkerd_policy_test::{ #[tokio::test(flavor = "current_thread")] async fn meshtls() { with_temp_ns(|client, ns| async move { - let srv = create(&client, web::server(&ns)).await; + let srv = create(&client, web::server(&ns, None)).await; create( &client, @@ -66,7 +66,7 @@ async fn network() { // Once we know the IP of the (blocked) pod, create an web // authorization policy that permits connections from this pod. - let srv = create(&client, web::server(&ns)).await; + let srv = create(&client, web::server(&ns, None)).await; create( &client, server_authz( @@ -143,7 +143,7 @@ async fn both() { // Once we know the IP of the (blocked) pod, create an web // authorization policy that permits connections from this pod. - let srv = create(&client, web::server(&ns)).await; + let srv = create(&client, web::server(&ns, None)).await; create( &client, server_authz( @@ -243,7 +243,7 @@ async fn either() { // Once we know the IP of the (blocked) pod, create an web // authorization policy that permits connections from this pod. - let srv = create(&client, web::server(&ns)).await; + let srv = create(&client, web::server(&ns, None)).await; tokio::join!( create( &client, diff --git a/policy-test/tests/inbound_api.rs b/policy-test/tests/inbound_api.rs index 9684f83224b6d..8620f935322f0 100644 --- a/policy-test/tests/inbound_api.rs +++ b/policy-test/tests/inbound_api.rs @@ -32,7 +32,7 @@ async fn server_with_server_authorization() { // Create a server that selects the pod's proxy admin server and ensure // that the update now uses this server, which has no authorizations - let server = create(&client, mk_admin_server(&ns, "linkerd-admin")).await; + let server = create(&client, mk_admin_server(&ns, "linkerd-admin", None)).await; let config = next_config(&mut rx).await; assert_eq!(config.protocol, Some(grpc::defaults::proxy_protocol())); assert_eq!(config.authorizations, vec![]); @@ -144,7 +144,7 @@ async fn server_with_authorization_policy() { // Create a server that selects the pod's proxy admin server and ensure // that the update now uses this server, which has no authorizations - let server = create(&client, mk_admin_server(&ns, "linkerd-admin")).await; + let server = create(&client, mk_admin_server(&ns, "linkerd-admin", None)).await; let config = next_config(&mut rx).await; assert_eq!(config.protocol, Some(grpc::defaults::proxy_protocol())); assert_eq!(config.authorizations, vec![]); @@ -239,6 +239,68 @@ async fn server_with_authorization_policy() { .await; } +#[tokio::test(flavor = "current_thread")] +async fn server_with_audit_policy() { + with_temp_ns(|client, ns| async move { + // Create a pod that does nothing. It's injected with a proxy, so we can + // attach policies to its admin server. + let pod = create_ready_pod(&client, mk_pause(&ns, "pause")).await; + + let mut rx = retry_watch_server(&client, &ns, &pod.name_unchecked()).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + assert_is_default_all_unauthenticated!(config); + assert_protocol_detect!(config); + + // Create a server with audit access policy that selects the pod's proxy admin server and + // ensure that the update now uses this server, and an unauthenticated authorization is + // returned + let server = create( + &client, + mk_admin_server(&ns, "linkerd-admin", Some("audit".to_string())), + ) + .await; + let config = next_config(&mut rx).await; + assert_eq!(config.protocol, Some(grpc::defaults::proxy_protocol())); + assert_eq!(config.authorizations.len(), 1); + assert_eq!( + config.authorizations.first().unwrap().labels, + convert_args!(hashmap!( + "group" => "", + "kind" => "default", + "name" => "audit", + )) + ); + assert_eq!( + *config + .authorizations + .first() + .unwrap() + .authentication + .as_ref() + .unwrap(), + grpc::inbound::Authn { + permit: Some(grpc::inbound::authn::Permit::Unauthenticated( + grpc::inbound::authn::PermitUnauthenticated {} + )), + } + ); + assert_eq!( + config.labels, + convert_args!(hashmap!( + "group" => "policy.linkerd.io", + "kind" => "server", + "name" => server.name_unchecked() + )) + ); + }) + .await; +} + #[tokio::test(flavor = "current_thread")] async fn server_with_http_route() { with_temp_ns(|client, ns| async move { @@ -259,7 +321,7 @@ async fn server_with_http_route() { // Create a server that selects the pod's proxy admin server and ensure // that the update now uses this server, which has no authorizations // and no routes. - let _server = create(&client, mk_admin_server(&ns, "linkerd-admin")).await; + let _server = create(&client, mk_admin_server(&ns, "linkerd-admin", None)).await; let config = next_config(&mut rx).await; assert_eq!(config.protocol, Some(grpc::defaults::proxy_protocol())); assert_eq!(config.authorizations, vec![]); @@ -419,7 +481,7 @@ async fn http_routes_ordered_by_creation() { // Create a server that selects the pod's proxy admin server and ensure // that the update now uses this server, which has no authorizations // and no routes. - let _server = create(&client, mk_admin_server(&ns, "linkerd-admin")).await; + let _server = create(&client, mk_admin_server(&ns, "linkerd-admin", None)).await; let config = next_config(&mut rx).await; assert_eq!(config.protocol, Some(grpc::defaults::proxy_protocol())); assert_eq!(config.authorizations, vec![]); @@ -622,7 +684,7 @@ fn mk_pause(ns: &str, name: &str) -> k8s::Pod { } } -fn mk_admin_server(ns: &str, name: &str) -> k8s::policy::Server { +fn mk_admin_server(ns: &str, name: &str, access_policy: Option) -> k8s::policy::Server { k8s::policy::Server { metadata: k8s::ObjectMeta { namespace: Some(ns.to_string()), @@ -633,6 +695,7 @@ fn mk_admin_server(ns: &str, name: &str) -> k8s::policy::Server { selector: k8s::policy::server::Selector::Pod(k8s::labels::Selector::default()), port: k8s::policy::server::Port::Number(4191.try_into().unwrap()), proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy, }, } } diff --git a/policy-test/tests/inbound_api_external_workload.rs b/policy-test/tests/inbound_api_external_workload.rs index d4bb9edaf39c8..c7c573eda1aed 100644 --- a/policy-test/tests/inbound_api_external_workload.rs +++ b/policy-test/tests/inbound_api_external_workload.rs @@ -459,6 +459,7 @@ fn mk_http_server(ns: &str, name: &str) -> k8s::policy::Server { ), port: k8s::policy::server::Port::Name("http".to_string()), proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy: None, }, } } diff --git a/policy-test/tests/inbound_http_route_status.rs b/policy-test/tests/inbound_http_route_status.rs index eaf84a6a06213..51166aeed0c6e 100644 --- a/policy-test/tests/inbound_http_route_status.rs +++ b/policy-test/tests/inbound_http_route_status.rs @@ -22,6 +22,7 @@ async fn inbound_accepted_parent() { )), port: k8s::policy::server::Port::Name("http".to_string()), proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy: None, }, }; let server = create(&client, server).await; @@ -99,6 +100,7 @@ async fn inbound_multiple_parents() { )), port: k8s::policy::server::Port::Name("http".to_string()), proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy: None, }, }; let _server = create(&client, server).await; @@ -147,6 +149,7 @@ async fn inbound_no_parent_ref_patch() { )), port: k8s::policy::server::Port::Name("http".to_string()), proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy: None, }, }; let server = create(&client, server).await; @@ -236,6 +239,7 @@ async fn inbound_accepted_reconcile_no_parent() { )), port: k8s::policy::server::Port::Name("http".to_string()), proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy: None, }, }; create(&client, server).await; @@ -285,6 +289,7 @@ async fn inbound_accepted_reconcile_parent_delete() { )), port: k8s::policy::server::Port::Name("http".to_string()), proxy_protocol: Some(k8s::policy::server::ProxyProtocol::Http1), + access_policy: None, }, }; create(&client, server).await;